repo_name
string
path
string
copies
string
size
string
content
string
license
string
maz-1/android_kernel_lge_msm8974
arch/sh/kernel/cpu/sh4a/clock-sh7343.c
4424
9815
/* * arch/sh/kernel/cpu/sh4a/clock-sh7343.c * * SH7343 clock framework support * * Copyright (C) 2009 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/clkdev.h> #include <asm/clock.h> /* SH7343 registers */ #define FRQCR 0xa4150000 #define VCLKCR 0xa4150004 #define SCLKACR 0xa4150008 #define SCLKBCR 0xa415000c #define PLLCR 0xa4150024 #define MSTPCR0 0xa4150030 #define MSTPCR1 0xa4150034 #define MSTPCR2 0xa4150038 #define DLLFRQ 0xa4150050 /* Fixed 32 KHz root clock for RTC and Power Management purposes */ static struct clk r_clk = { .rate = 32768, }; /* * Default rate for the root input clock, reset this with clk_set_rate() * from the platform code. */ struct clk extal_clk = { .rate = 33333333, }; /* The dll block multiplies the 32khz r_clk, may be used instead of extal */ static unsigned long dll_recalc(struct clk *clk) { unsigned long mult; if (__raw_readl(PLLCR) & 0x1000) mult = __raw_readl(DLLFRQ); else mult = 0; return clk->parent->rate * mult; } static struct sh_clk_ops dll_clk_ops = { .recalc = dll_recalc, }; static struct clk dll_clk = { .ops = &dll_clk_ops, .parent = &r_clk, .flags = CLK_ENABLE_ON_INIT, }; static unsigned long pll_recalc(struct clk *clk) { unsigned long mult = 1; if (__raw_readl(PLLCR) & 0x4000) mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1); return clk->parent->rate * mult; } static struct sh_clk_ops pll_clk_ops = { .recalc = pll_recalc, }; static struct clk pll_clk = { .ops = &pll_clk_ops, .flags = CLK_ENABLE_ON_INIT, }; struct clk *main_clks[] = { &r_clk, &extal_clk, &dll_clk, &pll_clk, }; static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; static struct clk_div_mult_table div4_div_mult_table = { .divisors = divisors, .nr_divisors = ARRAY_SIZE(divisors), .multipliers = multipliers, .nr_multipliers = ARRAY_SIZE(multipliers), }; static struct clk_div4_table div4_table = { .div_mult_table = &div4_div_mult_table, }; enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_SIUA, DIV4_SIUB, DIV4_NR }; #define DIV4(_reg, _bit, _mask, _flags) \ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags) struct clk div4_clks[DIV4_NR] = { [DIV4_I] = DIV4(FRQCR, 20, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0), [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0), [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0), }; enum { DIV6_V, DIV6_NR }; struct clk div6_clks[DIV6_NR] = { [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0), }; #define MSTP(_parent, _reg, _bit, _flags) \ SH_CLK_MSTP32(_parent, _reg, _bit, _flags) enum { MSTP031, MSTP030, MSTP029, MSTP028, MSTP026, MSTP023, MSTP022, MSTP021, MSTP020, MSTP019, MSTP018, MSTP017, MSTP016, MSTP015, MSTP014, MSTP013, MSTP012, MSTP011, MSTP010, MSTP007, MSTP006, MSTP005, MSTP004, MSTP003, MSTP002, MSTP001, MSTP109, MSTP108, MSTP100, MSTP225, MSTP224, MSTP218, MSTP217, MSTP216, MSTP214, MSTP213, MSTP212, MSTP211, MSTP208, MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, MSTP_NR }; static struct clk mstp_clks[MSTP_NR] = { [MSTP031] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT), [MSTP030] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT), [MSTP029] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT), [MSTP028] = MSTP(&div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT), [MSTP026] = MSTP(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT), [MSTP023] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 23, 0), [MSTP022] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 22, 0), [MSTP021] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 21, 0), [MSTP020] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 20, 0), [MSTP019] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 19, 0), [MSTP017] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 17, 0), [MSTP015] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 15, 0), [MSTP014] = MSTP(&r_clk, MSTPCR0, 14, 0), [MSTP013] = MSTP(&r_clk, MSTPCR0, 13, 0), [MSTP011] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 11, 0), [MSTP010] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 10, 0), [MSTP007] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 7, 0), [MSTP006] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 6, 0), [MSTP005] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 5, 0), [MSTP004] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 4, 0), [MSTP003] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 3, 0), [MSTP002] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 2, 0), [MSTP001] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 1, 0), [MSTP109] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 9, 0), [MSTP108] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 8, 0), [MSTP225] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 25, 0), [MSTP224] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 24, 0), [MSTP218] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 18, 0), [MSTP217] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 17, 0), [MSTP216] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 16, 0), [MSTP214] = MSTP(&r_clk, MSTPCR2, 14, 0), [MSTP213] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 13, 0), [MSTP212] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 12, 0), [MSTP211] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 11, 0), [MSTP208] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 8, 0), [MSTP206] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT), [MSTP205] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 5, 0), [MSTP204] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 4, 0), [MSTP203] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 3, 0), [MSTP202] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT), [MSTP201] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT), [MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0), }; static struct clk_lookup lookups[] = { /* main clocks */ CLKDEV_CON_ID("rclk", &r_clk), CLKDEV_CON_ID("extal", &extal_clk), CLKDEV_CON_ID("dll_clk", &dll_clk), CLKDEV_CON_ID("pll_clk", &pll_clk), /* DIV4 clocks */ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]), CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]), CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), CLKDEV_CON_ID("siua_clk", &div4_clks[DIV4_SIUA]), CLKDEV_CON_ID("siub_clk", &div4_clks[DIV4_SIUB]), /* DIV6 clocks */ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]), /* MSTP32 clocks */ CLKDEV_CON_ID("tlb0", &mstp_clks[MSTP031]), CLKDEV_CON_ID("ic0", &mstp_clks[MSTP030]), CLKDEV_CON_ID("oc0", &mstp_clks[MSTP029]), CLKDEV_CON_ID("uram0", &mstp_clks[MSTP028]), CLKDEV_CON_ID("xymem0", &mstp_clks[MSTP026]), CLKDEV_CON_ID("intc3", &mstp_clks[MSTP023]), CLKDEV_CON_ID("intc0", &mstp_clks[MSTP022]), CLKDEV_CON_ID("dmac0", &mstp_clks[MSTP021]), CLKDEV_CON_ID("sh0", &mstp_clks[MSTP020]), CLKDEV_CON_ID("hudi0", &mstp_clks[MSTP019]), CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP017]), CLKDEV_CON_ID("tmu_fck", &mstp_clks[MSTP015]), CLKDEV_CON_ID("cmt_fck", &mstp_clks[MSTP014]), CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]), CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]), CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]), CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP007]), CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP006]), CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP005]), CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP004]), CLKDEV_CON_ID("sio0", &mstp_clks[MSTP003]), CLKDEV_CON_ID("siof0", &mstp_clks[MSTP002]), CLKDEV_CON_ID("siof1", &mstp_clks[MSTP001]), CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP109]), CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP108]), CLKDEV_CON_ID("tpu0", &mstp_clks[MSTP225]), CLKDEV_CON_ID("irda0", &mstp_clks[MSTP224]), CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP218]), CLKDEV_CON_ID("mmcif0", &mstp_clks[MSTP217]), CLKDEV_CON_ID("sim0", &mstp_clks[MSTP216]), CLKDEV_CON_ID("keysc0", &mstp_clks[MSTP214]), CLKDEV_CON_ID("tsif0", &mstp_clks[MSTP213]), CLKDEV_CON_ID("s3d40", &mstp_clks[MSTP212]), CLKDEV_CON_ID("usbf0", &mstp_clks[MSTP211]), CLKDEV_CON_ID("siu0", &mstp_clks[MSTP208]), CLKDEV_CON_ID("jpu0", &mstp_clks[MSTP206]), CLKDEV_CON_ID("vou0", &mstp_clks[MSTP205]), CLKDEV_CON_ID("beu0", &mstp_clks[MSTP204]), CLKDEV_CON_ID("ceu0", &mstp_clks[MSTP203]), CLKDEV_CON_ID("veu0", &mstp_clks[MSTP202]), CLKDEV_CON_ID("vpu0", &mstp_clks[MSTP201]), CLKDEV_CON_ID("lcdc0", &mstp_clks[MSTP200]), }; int __init arch_clk_init(void) { int k, ret = 0; /* autodetect extal or dll configuration */ if (__raw_readl(PLLCR) & 0x1000) pll_clk.parent = &dll_clk; else pll_clk.parent = &extal_clk; for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_div6_register(div6_clks, DIV6_NR); if (!ret) ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); return ret; }
gpl-2.0
AnguisCaptor/PwnKernel_Shamu_M
fs/jffs2/background.c
7752
4311
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/jffs2.h> #include <linux/mtd/mtd.h> #include <linux/completion.h> #include <linux/sched.h> #include <linux/freezer.h> #include <linux/kthread.h> #include "nodelist.h" static int jffs2_garbage_collect_thread(void *); void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c) { assert_spin_locked(&c->erase_completion_lock); if (c->gc_task && jffs2_thread_should_wake(c)) send_sig(SIGHUP, c->gc_task, 1); } /* This must only ever be called when no GC thread is currently running */ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) { struct task_struct *tsk; int ret = 0; BUG_ON(c->gc_task); init_completion(&c->gc_thread_start); init_completion(&c->gc_thread_exit); tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index); if (IS_ERR(tsk)) { pr_warn("fork failed for JFFS2 garbage collect thread: %ld\n", -PTR_ERR(tsk)); complete(&c->gc_thread_exit); ret = PTR_ERR(tsk); } else { /* Wait for it... */ jffs2_dbg(1, "Garbage collect thread is pid %d\n", tsk->pid); wait_for_completion(&c->gc_thread_start); ret = tsk->pid; } return ret; } void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c) { int wait = 0; spin_lock(&c->erase_completion_lock); if (c->gc_task) { jffs2_dbg(1, "Killing GC task %d\n", c->gc_task->pid); send_sig(SIGKILL, c->gc_task, 1); wait = 1; } spin_unlock(&c->erase_completion_lock); if (wait) wait_for_completion(&c->gc_thread_exit); } static int jffs2_garbage_collect_thread(void *_c) { struct jffs2_sb_info *c = _c; allow_signal(SIGKILL); allow_signal(SIGSTOP); allow_signal(SIGCONT); c->gc_task = current; complete(&c->gc_thread_start); set_user_nice(current, 10); set_freezable(); for (;;) { allow_signal(SIGHUP); again: spin_lock(&c->erase_completion_lock); if (!jffs2_thread_should_wake(c)) { set_current_state (TASK_INTERRUPTIBLE); spin_unlock(&c->erase_completion_lock); jffs2_dbg(1, "%s(): sleeping...\n", __func__); schedule(); } else spin_unlock(&c->erase_completion_lock); /* Problem - immediately after bootup, the GCD spends a lot * of time in places like jffs2_kill_fragtree(); so much so * that userspace processes (like gdm and X) are starved * despite plenty of cond_resched()s and renicing. Yield() * doesn't help, either (presumably because userspace and GCD * are generally competing for a higher latency resource - * disk). * This forces the GCD to slow the hell down. Pulling an * inode in with read_inode() is much preferable to having * the GC thread get there first. */ schedule_timeout_interruptible(msecs_to_jiffies(50)); if (kthread_should_stop()) { jffs2_dbg(1, "%s(): kthread_stop() called\n", __func__); goto die; } /* Put_super will send a SIGKILL and then wait on the sem. */ while (signal_pending(current) || freezing(current)) { siginfo_t info; unsigned long signr; if (try_to_freeze()) goto again; signr = dequeue_signal_lock(current, &current->blocked, &info); switch(signr) { case SIGSTOP: jffs2_dbg(1, "%s(): SIGSTOP received\n", __func__); set_current_state(TASK_STOPPED); schedule(); break; case SIGKILL: jffs2_dbg(1, "%s(): SIGKILL received\n", __func__); goto die; case SIGHUP: jffs2_dbg(1, "%s(): SIGHUP received\n", __func__); break; default: jffs2_dbg(1, "%s(): signal %ld received\n", __func__, signr); } } /* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */ disallow_signal(SIGHUP); jffs2_dbg(1, "%s(): pass\n", __func__); if (jffs2_garbage_collect_pass(c) == -ENOSPC) { pr_notice("No space for garbage collection. Aborting GC thread\n"); goto die; } } die: spin_lock(&c->erase_completion_lock); c->gc_task = NULL; spin_unlock(&c->erase_completion_lock); complete_and_exit(&c->gc_thread_exit, 0); }
gpl-2.0
lloydchang/ubuntu-oneiric
arch/arm/mach-iop33x/irq.c
11848
2404
/* * arch/arm/mach-iop33x/irq.c * * Generic IOP331 IRQ handling functionality * * Author: Dave Jiang <dave.jiang@intel.com> * Copyright (C) 2003 Intel Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/list.h> #include <asm/mach/irq.h> #include <asm/irq.h> #include <mach/hardware.h> #include <asm/mach-types.h> static u32 iop33x_mask0; static u32 iop33x_mask1; static void intctl0_write(u32 val) { asm volatile("mcr p6, 0, %0, c0, c0, 0" : : "r" (val)); } static void intctl1_write(u32 val) { asm volatile("mcr p6, 0, %0, c1, c0, 0" : : "r" (val)); } static void intstr0_write(u32 val) { asm volatile("mcr p6, 0, %0, c2, c0, 0" : : "r" (val)); } static void intstr1_write(u32 val) { asm volatile("mcr p6, 0, %0, c3, c0, 0" : : "r" (val)); } static void intbase_write(u32 val) { asm volatile("mcr p6, 0, %0, c12, c0, 0" : : "r" (val)); } static void intsize_write(u32 val) { asm volatile("mcr p6, 0, %0, c13, c0, 0" : : "r" (val)); } static void iop33x_irq_mask1 (struct irq_data *d) { iop33x_mask0 &= ~(1 << d->irq); intctl0_write(iop33x_mask0); } static void iop33x_irq_mask2 (struct irq_data *d) { iop33x_mask1 &= ~(1 << (d->irq - 32)); intctl1_write(iop33x_mask1); } static void iop33x_irq_unmask1(struct irq_data *d) { iop33x_mask0 |= 1 << d->irq; intctl0_write(iop33x_mask0); } static void iop33x_irq_unmask2(struct irq_data *d) { iop33x_mask1 |= (1 << (d->irq - 32)); intctl1_write(iop33x_mask1); } struct irq_chip iop33x_irqchip1 = { .name = "IOP33x-1", .irq_ack = iop33x_irq_mask1, .irq_mask = iop33x_irq_mask1, .irq_unmask = iop33x_irq_unmask1, }; struct irq_chip iop33x_irqchip2 = { .name = "IOP33x-2", .irq_ack = iop33x_irq_mask2, .irq_mask = iop33x_irq_mask2, .irq_unmask = iop33x_irq_unmask2, }; void __init iop33x_init_irq(void) { int i; iop_init_cp6_handler(); intctl0_write(0); intctl1_write(0); intstr0_write(0); intstr1_write(0); intbase_write(0); intsize_write(1); if (machine_is_iq80331()) *IOP3XX_PCIIRSR = 0x0f; for (i = 0; i < NR_IRQS; i++) { irq_set_chip_and_handler(i, (i < 32) ? &iop33x_irqchip1 : &iop33x_irqchip2, handle_level_irq); set_irq_flags(i, IRQF_VALID | IRQF_PROBE); } }
gpl-2.0
HinTak/linux
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
1097
43941
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/slab.h> #include <linux/delay.h> #include <linux/pci.h> #include <net/cfg80211.h> #include <net/mac80211.h> #include <brcmu_utils.h> #include <aiutils.h> #include "types.h" #include "main.h" #include "dma.h" #include "soc.h" #include "scb.h" #include "ampdu.h" #include "debug.h" #include "brcms_trace_events.h" /* * dma register field offset calculation */ #define DMA64REGOFFS(field) offsetof(struct dma64regs, field) #define DMA64TXREGOFFS(di, field) (di->d64txregbase + DMA64REGOFFS(field)) #define DMA64RXREGOFFS(di, field) (di->d64rxregbase + DMA64REGOFFS(field)) /* * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within * a contiguous 8kB physical address. */ #define D64RINGALIGN_BITS 13 #define D64MAXRINGSZ (1 << D64RINGALIGN_BITS) #define D64RINGALIGN (1 << D64RINGALIGN_BITS) #define D64MAXDD (D64MAXRINGSZ / sizeof(struct dma64desc)) /* transmit channel control */ #define D64_XC_XE 0x00000001 /* transmit enable */ #define D64_XC_SE 0x00000002 /* transmit suspend request */ #define D64_XC_LE 0x00000004 /* loopback enable */ #define D64_XC_FL 0x00000010 /* flush request */ #define D64_XC_PD 0x00000800 /* parity check disable */ #define D64_XC_AE 0x00030000 /* address extension bits */ #define D64_XC_AE_SHIFT 16 /* transmit descriptor table pointer */ #define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */ /* transmit channel status */ #define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */ #define D64_XS0_XS_MASK 0xf0000000 /* transmit state */ #define D64_XS0_XS_SHIFT 28 #define D64_XS0_XS_DISABLED 0x00000000 /* disabled */ #define D64_XS0_XS_ACTIVE 0x10000000 /* active */ #define D64_XS0_XS_IDLE 0x20000000 /* idle wait */ #define D64_XS0_XS_STOPPED 0x30000000 /* stopped */ #define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */ #define D64_XS1_AD_MASK 0x00001fff /* active descriptor */ #define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */ #define D64_XS1_XE_SHIFT 28 #define D64_XS1_XE_NOERR 0x00000000 /* no error */ #define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */ #define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */ #define D64_XS1_XE_DTE 0x30000000 /* data transfer error */ #define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */ #define D64_XS1_XE_COREE 0x50000000 /* core error */ /* receive channel control */ /* receive enable */ #define D64_RC_RE 0x00000001 /* receive frame offset */ #define D64_RC_RO_MASK 0x000000fe #define D64_RC_RO_SHIFT 1 /* direct fifo receive (pio) mode */ #define D64_RC_FM 0x00000100 /* separate rx header descriptor enable */ #define D64_RC_SH 0x00000200 /* overflow continue */ #define D64_RC_OC 0x00000400 /* parity check disable */ #define D64_RC_PD 0x00000800 /* address extension bits */ #define D64_RC_AE 0x00030000 #define D64_RC_AE_SHIFT 16 /* flags for dma controller */ /* partity enable */ #define DMA_CTRL_PEN (1 << 0) /* rx overflow continue */ #define DMA_CTRL_ROC (1 << 1) /* allow rx scatter to multiple descriptors */ #define DMA_CTRL_RXMULTI (1 << 2) /* Unframed Rx/Tx data */ #define DMA_CTRL_UNFRAMED (1 << 3) /* receive descriptor table pointer */ #define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */ /* receive channel status */ #define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */ #define D64_RS0_RS_MASK 0xf0000000 /* receive state */ #define D64_RS0_RS_SHIFT 28 #define D64_RS0_RS_DISABLED 0x00000000 /* disabled */ #define D64_RS0_RS_ACTIVE 0x10000000 /* active */ #define D64_RS0_RS_IDLE 0x20000000 /* idle wait */ #define D64_RS0_RS_STOPPED 0x30000000 /* stopped */ #define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */ #define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */ #define D64_RS1_RE_MASK 0xf0000000 /* receive errors */ #define D64_RS1_RE_SHIFT 28 #define D64_RS1_RE_NOERR 0x00000000 /* no error */ #define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */ #define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */ #define D64_RS1_RE_DTE 0x30000000 /* data transfer error */ #define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */ #define D64_RS1_RE_COREE 0x50000000 /* core error */ /* fifoaddr */ #define D64_FA_OFF_MASK 0xffff /* offset */ #define D64_FA_SEL_MASK 0xf0000 /* select */ #define D64_FA_SEL_SHIFT 16 #define D64_FA_SEL_XDD 0x00000 /* transmit dma data */ #define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */ #define D64_FA_SEL_RDD 0x40000 /* receive dma data */ #define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */ #define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */ #define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */ #define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */ #define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */ #define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */ #define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */ /* descriptor control flags 1 */ #define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */ #define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */ #define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */ #define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */ #define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */ /* descriptor control flags 2 */ /* buffer byte count. real data len must <= 16KB */ #define D64_CTRL2_BC_MASK 0x00007fff /* address extension bits */ #define D64_CTRL2_AE 0x00030000 #define D64_CTRL2_AE_SHIFT 16 /* parity bit */ #define D64_CTRL2_PARITY 0x00040000 /* control flags in the range [27:20] are core-specific and not defined here */ #define D64_CTRL_CORE_MASK 0x0ff00000 #define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */ #define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */ #define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */ #define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */ /* * packet headroom necessary to accommodate the largest header * in the system, (i.e TXOFF). By doing, we avoid the need to * allocate an extra buffer for the header when bridging to WL. * There is a compile time check in wlc.c which ensure that this * value is at least as big as TXOFF. This value is used in * dma_rxfill(). */ #define BCMEXTRAHDROOM 172 #define MAXNAMEL 8 /* 8 char names */ /* macros to convert between byte offsets and indexes */ #define B2I(bytes, type) ((bytes) / sizeof(type)) #define I2B(index, type) ((index) * sizeof(type)) #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */ #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */ #define PCI64ADDR_HIGH 0x80000000 /* address[63] */ #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */ /* * DMA Descriptor * Descriptors are only read by the hardware, never written back. */ struct dma64desc { __le32 ctrl1; /* misc control bits & bufcount */ __le32 ctrl2; /* buffer count and address extension */ __le32 addrlow; /* memory address of the date buffer, bits 31:0 */ __le32 addrhigh; /* memory address of the date buffer, bits 63:32 */ }; /* dma engine software state */ struct dma_info { struct dma_pub dma; /* exported structure */ char name[MAXNAMEL]; /* callers name for diag msgs */ struct bcma_device *core; struct device *dmadev; /* session information for AMPDU */ struct brcms_ampdu_session ampdu_session; bool dma64; /* this dma engine is operating in 64-bit mode */ bool addrext; /* this dma engine supports DmaExtendedAddrChanges */ /* 64-bit dma tx engine registers */ uint d64txregbase; /* 64-bit dma rx engine registers */ uint d64rxregbase; /* pointer to dma64 tx descriptor ring */ struct dma64desc *txd64; /* pointer to dma64 rx descriptor ring */ struct dma64desc *rxd64; u16 dmadesc_align; /* alignment requirement for dma descriptors */ u16 ntxd; /* # tx descriptors tunable */ u16 txin; /* index of next descriptor to reclaim */ u16 txout; /* index of next descriptor to post */ /* pointer to parallel array of pointers to packets */ struct sk_buff **txp; /* Aligned physical address of descriptor ring */ dma_addr_t txdpa; /* Original physical address of descriptor ring */ dma_addr_t txdpaorig; u16 txdalign; /* #bytes added to alloc'd mem to align txd */ u32 txdalloc; /* #bytes allocated for the ring */ u32 xmtptrbase; /* When using unaligned descriptors, the ptr register * is not just an index, it needs all 13 bits to be * an offset from the addr register. */ u16 nrxd; /* # rx descriptors tunable */ u16 rxin; /* index of next descriptor to reclaim */ u16 rxout; /* index of next descriptor to post */ /* pointer to parallel array of pointers to packets */ struct sk_buff **rxp; /* Aligned physical address of descriptor ring */ dma_addr_t rxdpa; /* Original physical address of descriptor ring */ dma_addr_t rxdpaorig; u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */ u32 rxdalloc; /* #bytes allocated for the ring */ u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */ /* tunables */ unsigned int rxbufsize; /* rx buffer size in bytes, not including * the extra headroom */ uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper * stack, e.g. some rx pkt buffers will be * bridged to tx side without byte copying. * The extra headroom needs to be large enough * to fit txheader needs. Some dongle driver may * not need it. */ uint nrxpost; /* # rx buffers to keep posted */ unsigned int rxoffset; /* rxcontrol offset */ /* add to get dma address of descriptor ring, low 32 bits */ uint ddoffsetlow; /* high 32 bits */ uint ddoffsethigh; /* add to get dma address of data buffer, low 32 bits */ uint dataoffsetlow; /* high 32 bits */ uint dataoffsethigh; /* descriptor base need to be aligned or not */ bool aligndesc_4k; }; /* Check for odd number of 1's */ static u32 parity32(__le32 data) { /* no swap needed for counting 1's */ u32 par_data = *(u32 *)&data; par_data ^= par_data >> 16; par_data ^= par_data >> 8; par_data ^= par_data >> 4; par_data ^= par_data >> 2; par_data ^= par_data >> 1; return par_data & 1; } static bool dma64_dd_parity(struct dma64desc *dd) { return parity32(dd->addrlow ^ dd->addrhigh ^ dd->ctrl1 ^ dd->ctrl2); } /* descriptor bumping functions */ static uint xxd(uint x, uint n) { return x & (n - 1); /* faster than %, but n must be power of 2 */ } static uint txd(struct dma_info *di, uint x) { return xxd(x, di->ntxd); } static uint rxd(struct dma_info *di, uint x) { return xxd(x, di->nrxd); } static uint nexttxd(struct dma_info *di, uint i) { return txd(di, i + 1); } static uint prevtxd(struct dma_info *di, uint i) { return txd(di, i - 1); } static uint nextrxd(struct dma_info *di, uint i) { return rxd(di, i + 1); } static uint ntxdactive(struct dma_info *di, uint h, uint t) { return txd(di, t-h); } static uint nrxdactive(struct dma_info *di, uint h, uint t) { return rxd(di, t-h); } static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) { uint dmactrlflags; if (di == NULL) return 0; dmactrlflags = di->dma.dmactrlflags; dmactrlflags &= ~mask; dmactrlflags |= flags; /* If trying to enable parity, check if parity is actually supported */ if (dmactrlflags & DMA_CTRL_PEN) { u32 control; control = bcma_read32(di->core, DMA64TXREGOFFS(di, control)); bcma_write32(di->core, DMA64TXREGOFFS(di, control), control | D64_XC_PD); if (bcma_read32(di->core, DMA64TXREGOFFS(di, control)) & D64_XC_PD) /* We *can* disable it so it is supported, * restore control register */ bcma_write32(di->core, DMA64TXREGOFFS(di, control), control); else /* Not supported, don't allow it to be enabled */ dmactrlflags &= ~DMA_CTRL_PEN; } di->dma.dmactrlflags = dmactrlflags; return dmactrlflags; } static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset) { u32 w; bcma_set32(di->core, ctrl_offset, D64_XC_AE); w = bcma_read32(di->core, ctrl_offset); bcma_mask32(di->core, ctrl_offset, ~D64_XC_AE); return (w & D64_XC_AE) == D64_XC_AE; } /* * return true if this dma engine supports DmaExtendedAddrChanges, * otherwise false */ static bool _dma_isaddrext(struct dma_info *di) { /* DMA64 supports full 32- or 64-bit operation. AE is always valid */ /* not all tx or rx channel are available */ if (di->d64txregbase != 0) { if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control))) brcms_dbg_dma(di->core, "%s: DMA64 tx doesn't have AE set\n", di->name); return true; } else if (di->d64rxregbase != 0) { if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control))) brcms_dbg_dma(di->core, "%s: DMA64 rx doesn't have AE set\n", di->name); return true; } return false; } static bool _dma_descriptor_align(struct dma_info *di) { u32 addrl; /* Check to see if the descriptors need to be aligned on 4K/8K or not */ if (di->d64txregbase != 0) { bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), 0xff0); addrl = bcma_read32(di->core, DMA64TXREGOFFS(di, addrlow)); if (addrl != 0) return false; } else if (di->d64rxregbase != 0) { bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), 0xff0); addrl = bcma_read32(di->core, DMA64RXREGOFFS(di, addrlow)); if (addrl != 0) return false; } return true; } /* * Descriptor table must start at the DMA hardware dictated alignment, so * allocated memory must be large enough to support this requirement. */ static void *dma_alloc_consistent(struct dma_info *di, uint size, u16 align_bits, uint *alloced, dma_addr_t *pap) { if (align_bits) { u16 align = (1 << align_bits); if (!IS_ALIGNED(PAGE_SIZE, align)) size += align; *alloced = size; } return dma_alloc_coherent(di->dmadev, size, pap, GFP_ATOMIC); } static u8 dma_align_sizetobits(uint size) { u8 bitpos = 0; while (size >>= 1) bitpos++; return bitpos; } /* This function ensures that the DMA descriptor ring will not get allocated * across Page boundary. If the allocation is done across the page boundary * at the first time, then it is freed and the allocation is done at * descriptor ring size aligned location. This will ensure that the ring will * not cross page boundary */ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size, u16 *alignbits, uint *alloced, dma_addr_t *descpa) { void *va; u32 desc_strtaddr; u32 alignbytes = 1 << *alignbits; va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa); if (NULL == va) return NULL; desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes); if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr & boundary)) { *alignbits = dma_align_sizetobits(size); dma_free_coherent(di->dmadev, size, va, *descpa); va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa); } return va; } static bool dma64_alloc(struct dma_info *di, uint direction) { u16 size; uint ddlen; void *va; uint alloced = 0; u16 align; u16 align_bits; ddlen = sizeof(struct dma64desc); size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen); align_bits = di->dmadesc_align; align = (1 << align_bits); if (direction == DMA_TX) { va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, &alloced, &di->txdpaorig); if (va == NULL) { brcms_dbg_dma(di->core, "%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name); return false; } align = (1 << align_bits); di->txd64 = (struct dma64desc *) roundup((unsigned long)va, align); di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va); di->txdpa = di->txdpaorig + di->txdalign; di->txdalloc = alloced; } else { va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, &alloced, &di->rxdpaorig); if (va == NULL) { brcms_dbg_dma(di->core, "%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name); return false; } align = (1 << align_bits); di->rxd64 = (struct dma64desc *) roundup((unsigned long)va, align); di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va); di->rxdpa = di->rxdpaorig + di->rxdalign; di->rxdalloc = alloced; } return true; } static bool _dma_alloc(struct dma_info *di, uint direction) { return dma64_alloc(di, direction); } struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc, uint txregbase, uint rxregbase, uint ntxd, uint nrxd, uint rxbufsize, int rxextheadroom, uint nrxpost, uint rxoffset) { struct si_pub *sih = wlc->hw->sih; struct bcma_device *core = wlc->hw->d11core; struct dma_info *di; u8 rev = core->id.rev; uint size; struct si_info *sii = container_of(sih, struct si_info, pub); /* allocate private info structure */ di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC); if (di == NULL) return NULL; di->dma64 = ((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64); /* init dma reg info */ di->core = core; di->d64txregbase = txregbase; di->d64rxregbase = rxregbase; /* * Default flags (which can be changed by the driver calling * dma_ctrlflags before enable): For backwards compatibility * both Rx Overflow Continue and Parity are DISABLED. */ _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0); brcms_dbg_dma(di->core, "%s: %s flags 0x%x ntxd %d nrxd %d " "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d " "txregbase %u rxregbase %u\n", name, "DMA64", di->dma.dmactrlflags, ntxd, nrxd, rxbufsize, rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase); /* make a private copy of our callers name */ strncpy(di->name, name, MAXNAMEL); di->name[MAXNAMEL - 1] = '\0'; di->dmadev = core->dma_dev; /* save tunables */ di->ntxd = (u16) ntxd; di->nrxd = (u16) nrxd; /* the actual dma size doesn't include the extra headroom */ di->rxextrahdrroom = (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom; if (rxbufsize > BCMEXTRAHDROOM) di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom); else di->rxbufsize = (u16) rxbufsize; di->nrxpost = (u16) nrxpost; di->rxoffset = (u8) rxoffset; /* * figure out the DMA physical address offset for dd and data * PCI/PCIE: they map silicon backplace address to zero * based memory, need offset * Other bus: use zero SI_BUS BIGENDIAN kludge: use sdram * swapped region for data buffer, not descriptor */ di->ddoffsetlow = 0; di->dataoffsetlow = 0; /* for pci bus, add offset */ if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) { /* add offset for pcie with DMA64 bus */ di->ddoffsetlow = 0; di->ddoffsethigh = SI_PCIE_DMA_H32; } di->dataoffsetlow = di->ddoffsetlow; di->dataoffsethigh = di->ddoffsethigh; /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */ if ((core->id.id == BCMA_CORE_SDIO_DEV) && ((rev > 0) && (rev <= 2))) di->addrext = false; else if ((core->id.id == BCMA_CORE_I2S) && ((rev == 0) || (rev == 1))) di->addrext = false; else di->addrext = _dma_isaddrext(di); /* does the descriptor need to be aligned and if yes, on 4K/8K or not */ di->aligndesc_4k = _dma_descriptor_align(di); if (di->aligndesc_4k) { di->dmadesc_align = D64RINGALIGN_BITS; if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) /* for smaller dd table, HW relax alignment reqmnt */ di->dmadesc_align = D64RINGALIGN_BITS - 1; } else { di->dmadesc_align = 4; /* 16 byte alignment */ } brcms_dbg_dma(di->core, "DMA descriptor align_needed %d, align %d\n", di->aligndesc_4k, di->dmadesc_align); /* allocate tx packet pointer vector */ if (ntxd) { size = ntxd * sizeof(void *); di->txp = kzalloc(size, GFP_ATOMIC); if (di->txp == NULL) goto fail; } /* allocate rx packet pointer vector */ if (nrxd) { size = nrxd * sizeof(void *); di->rxp = kzalloc(size, GFP_ATOMIC); if (di->rxp == NULL) goto fail; } /* * allocate transmit descriptor ring, only need ntxd descriptors * but it must be aligned */ if (ntxd) { if (!_dma_alloc(di, DMA_TX)) goto fail; } /* * allocate receive descriptor ring, only need nrxd descriptors * but it must be aligned */ if (nrxd) { if (!_dma_alloc(di, DMA_RX)) goto fail; } if ((di->ddoffsetlow != 0) && !di->addrext) { if (di->txdpa > SI_PCI_DMA_SZ) { brcms_dbg_dma(di->core, "%s: txdpa 0x%x: addrext not supported\n", di->name, (u32)di->txdpa); goto fail; } if (di->rxdpa > SI_PCI_DMA_SZ) { brcms_dbg_dma(di->core, "%s: rxdpa 0x%x: addrext not supported\n", di->name, (u32)di->rxdpa); goto fail; } } /* Initialize AMPDU session */ brcms_c_ampdu_reset_session(&di->ampdu_session, wlc); brcms_dbg_dma(di->core, "ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext); return (struct dma_pub *) di; fail: dma_detach((struct dma_pub *)di); return NULL; } static inline void dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring, dma_addr_t pa, uint outidx, u32 *flags, u32 bufcount) { u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK; /* PCI bus with big(>1G) physical address, use address extension */ if ((di->dataoffsetlow == 0) || !(pa & PCI32ADDR_HIGH)) { ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow); ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh); ddring[outidx].ctrl1 = cpu_to_le32(*flags); ddring[outidx].ctrl2 = cpu_to_le32(ctrl2); } else { /* address extension for 32-bit PCI */ u32 ae; ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; pa &= ~PCI32ADDR_HIGH; ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE; ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow); ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh); ddring[outidx].ctrl1 = cpu_to_le32(*flags); ddring[outidx].ctrl2 = cpu_to_le32(ctrl2); } if (di->dma.dmactrlflags & DMA_CTRL_PEN) { if (dma64_dd_parity(&ddring[outidx])) ddring[outidx].ctrl2 = cpu_to_le32(ctrl2 | D64_CTRL2_PARITY); } } /* !! may be called with core in reset */ void dma_detach(struct dma_pub *pub) { struct dma_info *di = container_of(pub, struct dma_info, dma); brcms_dbg_dma(di->core, "%s:\n", di->name); /* free dma descriptor rings */ if (di->txd64) dma_free_coherent(di->dmadev, di->txdalloc, ((s8 *)di->txd64 - di->txdalign), (di->txdpaorig)); if (di->rxd64) dma_free_coherent(di->dmadev, di->rxdalloc, ((s8 *)di->rxd64 - di->rxdalign), (di->rxdpaorig)); /* free packet pointer vectors */ kfree(di->txp); kfree(di->rxp); /* free our private info structure */ kfree(di); } /* initialize descriptor table base address */ static void _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa) { if (!di->aligndesc_4k) { if (direction == DMA_TX) di->xmtptrbase = pa; else di->rcvptrbase = pa; } if ((di->ddoffsetlow == 0) || !(pa & PCI32ADDR_HIGH)) { if (direction == DMA_TX) { bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), pa + di->ddoffsetlow); bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh), di->ddoffsethigh); } else { bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), pa + di->ddoffsetlow); bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh), di->ddoffsethigh); } } else { /* DMA64 32bits address extension */ u32 ae; /* shift the high bit(s) from pa to ae */ ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; pa &= ~PCI32ADDR_HIGH; if (direction == DMA_TX) { bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), pa + di->ddoffsetlow); bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh), di->ddoffsethigh); bcma_maskset32(di->core, DMA64TXREGOFFS(di, control), D64_XC_AE, (ae << D64_XC_AE_SHIFT)); } else { bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), pa + di->ddoffsetlow); bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh), di->ddoffsethigh); bcma_maskset32(di->core, DMA64RXREGOFFS(di, control), D64_RC_AE, (ae << D64_RC_AE_SHIFT)); } } } static void _dma_rxenable(struct dma_info *di) { uint dmactrlflags = di->dma.dmactrlflags; u32 control; brcms_dbg_dma(di->core, "%s:\n", di->name); control = D64_RC_RE | (bcma_read32(di->core, DMA64RXREGOFFS(di, control)) & D64_RC_AE); if ((dmactrlflags & DMA_CTRL_PEN) == 0) control |= D64_RC_PD; if (dmactrlflags & DMA_CTRL_ROC) control |= D64_RC_OC; bcma_write32(di->core, DMA64RXREGOFFS(di, control), ((di->rxoffset << D64_RC_RO_SHIFT) | control)); } void dma_rxinit(struct dma_pub *pub) { struct dma_info *di = container_of(pub, struct dma_info, dma); brcms_dbg_dma(di->core, "%s:\n", di->name); if (di->nrxd == 0) return; di->rxin = di->rxout = 0; /* clear rx descriptor ring */ memset(di->rxd64, '\0', di->nrxd * sizeof(struct dma64desc)); /* DMA engine with out alignment requirement requires table to be inited * before enabling the engine */ if (!di->aligndesc_4k) _dma_ddtable_init(di, DMA_RX, di->rxdpa); _dma_rxenable(di); if (di->aligndesc_4k) _dma_ddtable_init(di, DMA_RX, di->rxdpa); } static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall) { uint i, curr; struct sk_buff *rxp; dma_addr_t pa; i = di->rxin; /* return if no packets posted */ if (i == di->rxout) return NULL; curr = B2I(((bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc); /* ignore curr if forceall */ if (!forceall && (i == curr)) return NULL; /* get the packet pointer that corresponds to the rx descriptor */ rxp = di->rxp[i]; di->rxp[i] = NULL; pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow; /* clear this packet from the descriptor ring */ dma_unmap_single(di->dmadev, pa, di->rxbufsize, DMA_FROM_DEVICE); di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef); di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef); di->rxin = nextrxd(di, i); return rxp; } static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall) { if (di->nrxd == 0) return NULL; return dma64_getnextrxp(di, forceall); } /* * !! rx entry routine * returns the number packages in the next frame, or 0 if there are no more * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is * supported with pkts chain * otherwise, it's treated as giant pkt and will be tossed. * The DMA scattering starts with normal DMA header, followed by first * buffer data. After it reaches the max size of buffer, the data continues * in next DMA descriptor buffer WITHOUT DMA header */ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list) { struct dma_info *di = container_of(pub, struct dma_info, dma); struct sk_buff_head dma_frames; struct sk_buff *p, *next; uint len; uint pkt_len; int resid = 0; int pktcnt = 1; skb_queue_head_init(&dma_frames); next_frame: p = _dma_getnextrxp(di, false); if (p == NULL) return 0; len = le16_to_cpu(*(__le16 *) (p->data)); brcms_dbg_dma(di->core, "%s: dma_rx len %d\n", di->name, len); dma_spin_for_len(len, p); /* set actual length */ pkt_len = min((di->rxoffset + len), di->rxbufsize); __skb_trim(p, pkt_len); skb_queue_tail(&dma_frames, p); resid = len - (di->rxbufsize - di->rxoffset); /* check for single or multi-buffer rx */ if (resid > 0) { while ((resid > 0) && (p = _dma_getnextrxp(di, false))) { pkt_len = min_t(uint, resid, di->rxbufsize); __skb_trim(p, pkt_len); skb_queue_tail(&dma_frames, p); resid -= di->rxbufsize; pktcnt++; } #ifdef DEBUG if (resid > 0) { uint cur; cur = B2I(((bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc); brcms_dbg_dma(di->core, "rxin %d rxout %d, hw_curr %d\n", di->rxin, di->rxout, cur); } #endif /* DEBUG */ if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) { brcms_dbg_dma(di->core, "%s: bad frame length (%d)\n", di->name, len); skb_queue_walk_safe(&dma_frames, p, next) { skb_unlink(p, &dma_frames); brcmu_pkt_buf_free_skb(p); } di->dma.rxgiants++; pktcnt = 1; goto next_frame; } } skb_queue_splice_tail(&dma_frames, skb_list); return pktcnt; } static bool dma64_rxidle(struct dma_info *di) { brcms_dbg_dma(di->core, "%s:\n", di->name); if (di->nrxd == 0) return true; return ((bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) == (bcma_read32(di->core, DMA64RXREGOFFS(di, ptr)) & D64_RS0_CD_MASK)); } static bool dma64_txidle(struct dma_info *di) { if (di->ntxd == 0) return true; return ((bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) & D64_XS0_CD_MASK) == (bcma_read32(di->core, DMA64TXREGOFFS(di, ptr)) & D64_XS0_CD_MASK)); } /* * post receive buffers * Return false if refill failed completely or dma mapping failed. The ring * is empty, which will stall the rx dma and user might want to call rxfill * again asap. This is unlikely to happen on a memory-rich NIC, but often on * memory-constrained dongle. */ bool dma_rxfill(struct dma_pub *pub) { struct dma_info *di = container_of(pub, struct dma_info, dma); struct sk_buff *p; u16 rxin, rxout; u32 flags = 0; uint n; uint i; dma_addr_t pa; uint extra_offset = 0; bool ring_empty; ring_empty = false; /* * Determine how many receive buffers we're lacking * from the full complement, allocate, initialize, * and post them, then update the chip rx lastdscr. */ rxin = di->rxin; rxout = di->rxout; n = di->nrxpost - nrxdactive(di, rxin, rxout); brcms_dbg_dma(di->core, "%s: post %d\n", di->name, n); if (di->rxbufsize > BCMEXTRAHDROOM) extra_offset = di->rxextrahdrroom; for (i = 0; i < n; i++) { /* * the di->rxbufsize doesn't include the extra headroom, * we need to add it to the size to be allocated */ p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset); if (p == NULL) { brcms_dbg_dma(di->core, "%s: out of rxbufs\n", di->name); if (i == 0 && dma64_rxidle(di)) { brcms_dbg_dma(di->core, "%s: ring is empty !\n", di->name); ring_empty = true; } di->dma.rxnobuf++; break; } /* reserve an extra headroom, if applicable */ if (extra_offset) skb_pull(p, extra_offset); /* Do a cached write instead of uncached write since DMA_MAP * will flush the cache. */ *(u32 *) (p->data) = 0; pa = dma_map_single(di->dmadev, p->data, di->rxbufsize, DMA_FROM_DEVICE); if (dma_mapping_error(di->dmadev, pa)) { brcmu_pkt_buf_free_skb(p); return false; } /* save the free packet pointer */ di->rxp[rxout] = p; /* reset flags for each descriptor */ flags = 0; if (rxout == (di->nrxd - 1)) flags = D64_CTRL1_EOT; dma64_dd_upd(di, di->rxd64, pa, rxout, &flags, di->rxbufsize); rxout = nextrxd(di, rxout); } di->rxout = rxout; /* update the chip lastdscr pointer */ bcma_write32(di->core, DMA64RXREGOFFS(di, ptr), di->rcvptrbase + I2B(rxout, struct dma64desc)); return ring_empty; } void dma_rxreclaim(struct dma_pub *pub) { struct dma_info *di = container_of(pub, struct dma_info, dma); struct sk_buff *p; brcms_dbg_dma(di->core, "%s:\n", di->name); while ((p = _dma_getnextrxp(di, true))) brcmu_pkt_buf_free_skb(p); } void dma_counterreset(struct dma_pub *pub) { /* reset all software counters */ pub->rxgiants = 0; pub->rxnobuf = 0; pub->txnobuf = 0; } /* get the address of the var in order to change later */ unsigned long dma_getvar(struct dma_pub *pub, const char *name) { struct dma_info *di = container_of(pub, struct dma_info, dma); if (!strcmp(name, "&txavail")) return (unsigned long)&(di->dma.txavail); return 0; } /* 64-bit DMA functions */ void dma_txinit(struct dma_pub *pub) { struct dma_info *di = container_of(pub, struct dma_info, dma); u32 control = D64_XC_XE; brcms_dbg_dma(di->core, "%s:\n", di->name); if (di->ntxd == 0) return; di->txin = di->txout = 0; di->dma.txavail = di->ntxd - 1; /* clear tx descriptor ring */ memset(di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc))); /* DMA engine with out alignment requirement requires table to be inited * before enabling the engine */ if (!di->aligndesc_4k) _dma_ddtable_init(di, DMA_TX, di->txdpa); if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0) control |= D64_XC_PD; bcma_set32(di->core, DMA64TXREGOFFS(di, control), control); /* DMA engine with alignment requirement requires table to be inited * before enabling the engine */ if (di->aligndesc_4k) _dma_ddtable_init(di, DMA_TX, di->txdpa); } void dma_txsuspend(struct dma_pub *pub) { struct dma_info *di = container_of(pub, struct dma_info, dma); brcms_dbg_dma(di->core, "%s:\n", di->name); if (di->ntxd == 0) return; bcma_set32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE); } void dma_txresume(struct dma_pub *pub) { struct dma_info *di = container_of(pub, struct dma_info, dma); brcms_dbg_dma(di->core, "%s:\n", di->name); if (di->ntxd == 0) return; bcma_mask32(di->core, DMA64TXREGOFFS(di, control), ~D64_XC_SE); } bool dma_txsuspended(struct dma_pub *pub) { struct dma_info *di = container_of(pub, struct dma_info, dma); return (di->ntxd == 0) || ((bcma_read32(di->core, DMA64TXREGOFFS(di, control)) & D64_XC_SE) == D64_XC_SE); } void dma_txreclaim(struct dma_pub *pub, enum txd_range range) { struct dma_info *di = container_of(pub, struct dma_info, dma); struct sk_buff *p; brcms_dbg_dma(di->core, "%s: %s\n", di->name, range == DMA_RANGE_ALL ? "all" : range == DMA_RANGE_TRANSMITTED ? "transmitted" : "transferred"); if (di->txin == di->txout) return; while ((p = dma_getnexttxp(pub, range))) { /* For unframed data, we don't have any packets to free */ if (!(di->dma.dmactrlflags & DMA_CTRL_UNFRAMED)) brcmu_pkt_buf_free_skb(p); } } bool dma_txreset(struct dma_pub *pub) { struct dma_info *di = container_of(pub, struct dma_info, dma); u32 status; if (di->ntxd == 0) return true; /* suspend tx DMA first */ bcma_write32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE); SPINWAIT(((status = (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) & D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED), 10000); bcma_write32(di->core, DMA64TXREGOFFS(di, control), 0); SPINWAIT(((status = (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) & D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000); /* wait for the last transaction to complete */ udelay(300); return status == D64_XS0_XS_DISABLED; } bool dma_rxreset(struct dma_pub *pub) { struct dma_info *di = container_of(pub, struct dma_info, dma); u32 status; if (di->nrxd == 0) return true; bcma_write32(di->core, DMA64RXREGOFFS(di, control), 0); SPINWAIT(((status = (bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) & D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000); return status == D64_RS0_RS_DISABLED; } static void dma_txenq(struct dma_info *di, struct sk_buff *p) { unsigned char *data; uint len; u16 txout; u32 flags = 0; dma_addr_t pa; txout = di->txout; if (WARN_ON(nexttxd(di, txout) == di->txin)) return; /* * obtain and initialize transmit descriptor entry. */ data = p->data; len = p->len; /* get physical address of buffer start */ pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE); /* if mapping failed, free skb */ if (dma_mapping_error(di->dmadev, pa)) { brcmu_pkt_buf_free_skb(p); return; } /* With a DMA segment list, Descriptor table is filled * using the segment list instead of looping over * buffers in multi-chain DMA. Therefore, EOF for SGLIST * is when end of segment list is reached. */ flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF; if (txout == (di->ntxd - 1)) flags |= D64_CTRL1_EOT; dma64_dd_upd(di, di->txd64, pa, txout, &flags, len); txout = nexttxd(di, txout); /* save the packet */ di->txp[prevtxd(di, txout)] = p; /* bump the tx descriptor index */ di->txout = txout; } static void ampdu_finalize(struct dma_info *di) { struct brcms_ampdu_session *session = &di->ampdu_session; struct sk_buff *p; trace_brcms_ampdu_session(&session->wlc->hw->d11core->dev, session->max_ampdu_len, session->max_ampdu_frames, session->ampdu_len, skb_queue_len(&session->skb_list), session->dma_len); if (WARN_ON(skb_queue_empty(&session->skb_list))) return; brcms_c_ampdu_finalize(session); while (!skb_queue_empty(&session->skb_list)) { p = skb_dequeue(&session->skb_list); dma_txenq(di, p); } bcma_write32(di->core, DMA64TXREGOFFS(di, ptr), di->xmtptrbase + I2B(di->txout, struct dma64desc)); brcms_c_ampdu_reset_session(session, session->wlc); } static void prep_ampdu_frame(struct dma_info *di, struct sk_buff *p) { struct brcms_ampdu_session *session = &di->ampdu_session; int ret; ret = brcms_c_ampdu_add_frame(session, p); if (ret == -ENOSPC) { /* * AMPDU cannot accomodate this frame. Close out the in- * progress AMPDU session and start a new one. */ ampdu_finalize(di); ret = brcms_c_ampdu_add_frame(session, p); } WARN_ON(ret); } /* Update count of available tx descriptors based on current DMA state */ static void dma_update_txavail(struct dma_info *di) { /* * Available space is number of descriptors less the number of * active descriptors and the number of queued AMPDU frames. */ di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - skb_queue_len(&di->ampdu_session.skb_list) - 1; } /* * !! tx entry routine * WARNING: call must check the return value for error. * the error(toss frames) could be fatal and cause many subsequent hard * to debug problems */ int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub, struct sk_buff *p) { struct dma_info *di = container_of(pub, struct dma_info, dma); struct brcms_ampdu_session *session = &di->ampdu_session; struct ieee80211_tx_info *tx_info; bool is_ampdu; /* no use to transmit a zero length packet */ if (p->len == 0) return 0; /* return nonzero if out of tx descriptors */ if (di->dma.txavail == 0 || nexttxd(di, di->txout) == di->txin) goto outoftxd; tx_info = IEEE80211_SKB_CB(p); is_ampdu = tx_info->flags & IEEE80211_TX_CTL_AMPDU; if (is_ampdu) prep_ampdu_frame(di, p); else dma_txenq(di, p); /* tx flow control */ dma_update_txavail(di); /* kick the chip */ if (is_ampdu) { /* * Start sending data if we've got a full AMPDU, there's * no more space in the DMA ring, or the ring isn't * currently transmitting. */ if (skb_queue_len(&session->skb_list) == session->max_ampdu_frames || di->dma.txavail == 0 || dma64_txidle(di)) ampdu_finalize(di); } else { bcma_write32(di->core, DMA64TXREGOFFS(di, ptr), di->xmtptrbase + I2B(di->txout, struct dma64desc)); } return 0; outoftxd: brcms_dbg_dma(di->core, "%s: out of txds !!!\n", di->name); brcmu_pkt_buf_free_skb(p); di->dma.txavail = 0; di->dma.txnobuf++; return -ENOSPC; } void dma_txflush(struct dma_pub *pub) { struct dma_info *di = container_of(pub, struct dma_info, dma); struct brcms_ampdu_session *session = &di->ampdu_session; if (!skb_queue_empty(&session->skb_list)) ampdu_finalize(di); } int dma_txpending(struct dma_pub *pub) { struct dma_info *di = container_of(pub, struct dma_info, dma); return ntxdactive(di, di->txin, di->txout); } /* * If we have an active AMPDU session and are not transmitting, * this function will force tx to start. */ void dma_kick_tx(struct dma_pub *pub) { struct dma_info *di = container_of(pub, struct dma_info, dma); struct brcms_ampdu_session *session = &di->ampdu_session; if (!skb_queue_empty(&session->skb_list) && dma64_txidle(di)) ampdu_finalize(di); } /* * Reclaim next completed txd (txds if using chained buffers) in the range * specified and return associated packet. * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be * transmitted as noted by the hardware "CurrDescr" pointer. * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be * transferred by the DMA as noted by the hardware "ActiveDescr" pointer. * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and * return associated packet regardless of the value of hardware pointers. */ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) { struct dma_info *di = container_of(pub, struct dma_info, dma); u16 start, end, i; u16 active_desc; struct sk_buff *txp; brcms_dbg_dma(di->core, "%s: %s\n", di->name, range == DMA_RANGE_ALL ? "all" : range == DMA_RANGE_TRANSMITTED ? "transmitted" : "transferred"); if (di->ntxd == 0) return NULL; txp = NULL; start = di->txin; if (range == DMA_RANGE_ALL) end = di->txout; else { end = (u16) (B2I(((bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) & D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK, struct dma64desc)); if (range == DMA_RANGE_TRANSFERED) { active_desc = (u16)(bcma_read32(di->core, DMA64TXREGOFFS(di, status1)) & D64_XS1_AD_MASK); active_desc = (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK; active_desc = B2I(active_desc, struct dma64desc); if (end != active_desc) end = prevtxd(di, active_desc); } } if ((start == 0) && (end > di->txout)) goto bogus; for (i = start; i != end && !txp; i = nexttxd(di, i)) { dma_addr_t pa; uint size; pa = le32_to_cpu(di->txd64[i].addrlow) - di->dataoffsetlow; size = (le32_to_cpu(di->txd64[i].ctrl2) & D64_CTRL2_BC_MASK); di->txd64[i].addrlow = cpu_to_le32(0xdeadbeef); di->txd64[i].addrhigh = cpu_to_le32(0xdeadbeef); txp = di->txp[i]; di->txp[i] = NULL; dma_unmap_single(di->dmadev, pa, size, DMA_TO_DEVICE); } di->txin = i; /* tx flow control */ dma_update_txavail(di); return txp; bogus: brcms_dbg_dma(di->core, "bogus curr: start %d end %d txout %d\n", start, end, di->txout); return NULL; } /* * Mac80211 initiated actions sometimes require packets in the DMA queue to be * modified. The modified portion of the packet is not under control of the DMA * engine. This function calls a caller-supplied function for each packet in * the caller specified dma chain. */ void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc) (void *pkt, void *arg_a), void *arg_a) { struct dma_info *di = container_of(dmah, struct dma_info, dma); uint i = di->txin; uint end = di->txout; struct sk_buff *skb; struct ieee80211_tx_info *tx_info; while (i != end) { skb = di->txp[i]; if (skb != NULL) { tx_info = (struct ieee80211_tx_info *)skb->cb; (callback_fnc)(tx_info, arg_a); } i = nexttxd(di, i); } }
gpl-2.0
shabinp555/https-github.com-torvalds-linux
arch/x86/um/sysrq_64.c
2121
1268
/* * Copyright 2003 PathScale, Inc. * * Licensed under the GPL */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/utsname.h> #include <asm/current.h> #include <asm/ptrace.h> #include <asm/sysrq.h> void show_regs(struct pt_regs *regs) { printk("\n"); print_modules(); printk(KERN_INFO "Pid: %d, comm: %.20s %s %s\n", task_pid_nr(current), current->comm, print_tainted(), init_utsname()->release); printk(KERN_INFO "RIP: %04lx:[<%016lx>]\n", PT_REGS_CS(regs) & 0xffff, PT_REGS_IP(regs)); printk(KERN_INFO "RSP: %016lx EFLAGS: %08lx\n", PT_REGS_SP(regs), PT_REGS_EFLAGS(regs)); printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n", PT_REGS_AX(regs), PT_REGS_BX(regs), PT_REGS_CX(regs)); printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n", PT_REGS_DX(regs), PT_REGS_SI(regs), PT_REGS_DI(regs)); printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n", PT_REGS_BP(regs), PT_REGS_R8(regs), PT_REGS_R9(regs)); printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n", PT_REGS_R10(regs), PT_REGS_R11(regs), PT_REGS_R12(regs)); printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n", PT_REGS_R13(regs), PT_REGS_R14(regs), PT_REGS_R15(regs)); }
gpl-2.0
Fusion-Devices/android_kernel_mediatek_sprout
drivers/staging/keucr/init.c
2377
9679
#include <linux/sched.h> #include <linux/errno.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_device.h> #include "usb.h" #include "scsiglue.h" #include "transport.h" #include "init.h" /* * ENE_InitMedia(): */ int ENE_InitMedia(struct us_data *us) { int result; BYTE MiscReg03 = 0; printk(KERN_INFO "--- Init Media ---\n"); result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03); if (result != USB_STOR_XFER_GOOD) { printk(KERN_ERR "Read register fail !!\n"); return USB_STOR_TRANSPORT_ERROR; } printk(KERN_INFO "MiscReg03 = %x\n", MiscReg03); if (MiscReg03 & 0x02) { if (!us->SM_Status.Ready && !us->MS_Status.Ready) { result = ENE_SMInit(us); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; } } return result; } /* * ENE_Read_BYTE() : */ int ENE_Read_BYTE(struct us_data *us, WORD index, void *buf) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; int result; memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x01; bcb->Flags = 0x80; bcb->CDB[0] = 0xED; bcb->CDB[2] = (BYTE)(index>>8); bcb->CDB[3] = (BYTE)index; result = ENE_SendScsiCmd(us, FDIR_READ, buf, 0); return result; } /* *ENE_SMInit() */ int ENE_SMInit(struct us_data *us) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; int result; BYTE buf[0x200]; printk(KERN_INFO "transport --- ENE_SMInit\n"); result = ENE_LoadBinCode(us, SM_INIT_PATTERN); if (result != USB_STOR_XFER_GOOD) { printk(KERN_INFO "Load SM Init Code Fail !!\n"); return USB_STOR_TRANSPORT_ERROR; } memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200; bcb->Flags = 0x80; bcb->CDB[0] = 0xF1; bcb->CDB[1] = 0x01; result = ENE_SendScsiCmd(us, FDIR_READ, &buf, 0); if (result != USB_STOR_XFER_GOOD) { printk(KERN_ERR "Execution SM Init Code Fail !! result = %x\n", result); return USB_STOR_TRANSPORT_ERROR; } us->SM_Status = *(PSM_STATUS)&buf[0]; us->SM_DeviceID = buf[1]; us->SM_CardID = buf[2]; if (us->SM_Status.Insert && us->SM_Status.Ready) { printk(KERN_INFO "Insert = %x\n", us->SM_Status.Insert); printk(KERN_INFO "Ready = %x\n", us->SM_Status.Ready); printk(KERN_INFO "WtP = %x\n", us->SM_Status.WtP); printk(KERN_INFO "DeviceID = %x\n", us->SM_DeviceID); printk(KERN_INFO "CardID = %x\n", us->SM_CardID); MediaChange = 1; Check_D_MediaFmt(us); } else { printk(KERN_ERR "SM Card Not Ready --- %x\n", buf[0]); return USB_STOR_TRANSPORT_ERROR; } return USB_STOR_TRANSPORT_GOOD; } /* * ENE_LoadBinCode() */ int ENE_LoadBinCode(struct us_data *us, BYTE flag) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; int result; /* void *buf; */ PBYTE buf; /* printk(KERN_INFO "transport --- ENE_LoadBinCode\n"); */ if (us->BIN_FLAG == flag) return USB_STOR_TRANSPORT_GOOD; buf = kmalloc(0x800, GFP_KERNEL); if (buf == NULL) return USB_STOR_TRANSPORT_ERROR; switch (flag) { /* For SS */ case SM_INIT_PATTERN: printk(KERN_INFO "SM_INIT_PATTERN\n"); memcpy(buf, SM_Init, 0x800); break; case SM_RW_PATTERN: printk(KERN_INFO "SM_RW_PATTERN\n"); memcpy(buf, SM_Rdwr, 0x800); break; } memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x800; bcb->Flags = 0x00; bcb->CDB[0] = 0xEF; result = ENE_SendScsiCmd(us, FDIR_WRITE, buf, 0); kfree(buf); us->BIN_FLAG = flag; return result; } /* * ENE_SendScsiCmd(): */ int ENE_SendScsiCmd(struct us_data *us, BYTE fDir, void *buf, int use_sg) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf; int result; unsigned int transfer_length = bcb->DataTransferLength, cswlen = 0, partial = 0; unsigned int residue; /* printk(KERN_INFO "transport --- ENE_SendScsiCmd\n"); */ /* send cmd to out endpoint */ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, NULL); if (result != USB_STOR_XFER_GOOD) { printk(KERN_ERR "send cmd to out endpoint fail ---\n"); return USB_STOR_TRANSPORT_ERROR; } if (buf) { unsigned int pipe = fDir; if (fDir == FDIR_READ) pipe = us->recv_bulk_pipe; else pipe = us->send_bulk_pipe; /* Bulk */ if (use_sg) result = usb_stor_bulk_srb(us, pipe, us->srb); else result = usb_stor_bulk_transfer_sg(us, pipe, buf, transfer_length, 0, &partial); if (result != USB_STOR_XFER_GOOD) { printk(KERN_ERR "data transfer fail ---\n"); return USB_STOR_TRANSPORT_ERROR; } } /* Get CSW for device status */ result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen); if (result == USB_STOR_XFER_SHORT && cswlen == 0) { printk(KERN_WARNING "Received 0-length CSW; retrying...\n"); result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen); } if (result == USB_STOR_XFER_STALLED) { /* get the status again */ printk(KERN_WARNING "Attempting to get CSW (2nd try)...\n"); result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, NULL); } if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* check bulk status */ residue = le32_to_cpu(bcs->Residue); /* * try to compute the actual residue, based on how much data * was really transferred and what the device tells us */ if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) { residue = min(residue, transfer_length); if (us->srb) scsi_set_resid(us->srb, max(scsi_get_resid(us->srb), (int) residue)); } if (bcs->Status != US_BULK_STAT_OK) return USB_STOR_TRANSPORT_ERROR; return USB_STOR_TRANSPORT_GOOD; } /* * ENE_Read_Data() */ int ENE_Read_Data(struct us_data *us, void *buf, unsigned int length) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf; int result; /* printk(KERN_INFO "transport --- ENE_Read_Data\n"); */ /* set up the command wrapper */ memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = length; bcb->Flags = 0x80; bcb->CDB[0] = 0xED; bcb->CDB[2] = 0xFF; bcb->CDB[3] = 0x81; /* send cmd to out endpoint */ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, NULL); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* R/W data */ result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, buf, length, NULL); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* Get CSW for device status */ result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, NULL); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (bcs->Status != US_BULK_STAT_OK) return USB_STOR_TRANSPORT_ERROR; return USB_STOR_TRANSPORT_GOOD; } /* * ENE_Write_Data(): */ int ENE_Write_Data(struct us_data *us, void *buf, unsigned int length) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf; int result; /* printk("transport --- ENE_Write_Data\n"); */ /* set up the command wrapper */ memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = length; bcb->Flags = 0x00; bcb->CDB[0] = 0xEE; bcb->CDB[2] = 0xFF; bcb->CDB[3] = 0x81; /* send cmd to out endpoint */ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, NULL); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* R/W data */ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, buf, length, NULL); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* Get CSW for device status */ result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, NULL); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (bcs->Status != US_BULK_STAT_OK) return USB_STOR_TRANSPORT_ERROR; return USB_STOR_TRANSPORT_GOOD; } /* * usb_stor_print_cmd(): */ void usb_stor_print_cmd(struct scsi_cmnd *srb) { PBYTE Cdb = srb->cmnd; DWORD cmd = Cdb[0]; DWORD bn = ((Cdb[2] << 24) & 0xff000000) | ((Cdb[3] << 16) & 0x00ff0000) | ((Cdb[4] << 8) & 0x0000ff00) | ((Cdb[5] << 0) & 0x000000ff); WORD blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff); switch (cmd) { case TEST_UNIT_READY: /* printk(KERN_INFO "scsi cmd %X --- SCSIOP_TEST_UNIT_READY\n", cmd); */ break; case INQUIRY: printk(KERN_INFO "scsi cmd %X --- SCSIOP_INQUIRY\n", cmd); break; case MODE_SENSE: printk(KERN_INFO "scsi cmd %X --- SCSIOP_MODE_SENSE\n", cmd); break; case START_STOP: printk(KERN_INFO "scsi cmd %X --- SCSIOP_START_STOP\n", cmd); break; case READ_CAPACITY: printk(KERN_INFO "scsi cmd %X --- SCSIOP_READ_CAPACITY\n", cmd); break; case READ_10: /* printk(KERN_INFO "scsi cmd %X --- SCSIOP_READ,bn = %X, blen = %X\n" ,cmd, bn, blen); */ break; case WRITE_10: /* printk(KERN_INFO "scsi cmd %X --- SCSIOP_WRITE, bn = %X, blen = %X\n" , cmd, bn, blen); */ break; case ALLOW_MEDIUM_REMOVAL: printk(KERN_INFO "scsi cmd %X --- SCSIOP_ALLOW_MEDIUM_REMOVAL\n", cmd); break; default: printk(KERN_INFO "scsi cmd %X --- Other cmd\n", cmd); break; } bn = 0; blen = 0; }
gpl-2.0
evnit/android_kernel_samsung_msm8660-common-10.2
drivers/target/target_core_tmr.c
2377
13390
/******************************************************************************* * Filename: target_core_tmr.c * * This file contains SPC-3 task management infrastructure * * Copyright (c) 2009,2010 Rising Tide Systems * Copyright (c) 2009,2010 Linux-iSCSI.org * * Nicholas A. Bellinger <nab@kernel.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ******************************************************************************/ #include <linux/version.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/list.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <target/target_core_base.h> #include <target/target_core_device.h> #include <target/target_core_tmr.h> #include <target/target_core_transport.h> #include <target/target_core_fabric_ops.h> #include <target/target_core_configfs.h> #include "target_core_alua.h" #include "target_core_pr.h" #define DEBUG_LUN_RESET #ifdef DEBUG_LUN_RESET #define DEBUG_LR(x...) printk(KERN_INFO x) #else #define DEBUG_LR(x...) #endif struct se_tmr_req *core_tmr_alloc_req( struct se_cmd *se_cmd, void *fabric_tmr_ptr, u8 function) { struct se_tmr_req *tmr; tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ? GFP_ATOMIC : GFP_KERNEL); if (!(tmr)) { printk(KERN_ERR "Unable to allocate struct se_tmr_req\n"); return ERR_PTR(-ENOMEM); } tmr->task_cmd = se_cmd; tmr->fabric_tmr_ptr = fabric_tmr_ptr; tmr->function = function; INIT_LIST_HEAD(&tmr->tmr_list); return tmr; } EXPORT_SYMBOL(core_tmr_alloc_req); void core_tmr_release_req( struct se_tmr_req *tmr) { struct se_device *dev = tmr->tmr_dev; if (!dev) { kmem_cache_free(se_tmr_req_cache, tmr); return; } spin_lock(&dev->se_tmr_lock); list_del(&tmr->tmr_list); spin_unlock(&dev->se_tmr_lock); kmem_cache_free(se_tmr_req_cache, tmr); } static void core_tmr_handle_tas_abort( struct se_node_acl *tmr_nacl, struct se_cmd *cmd, int tas, int fe_count) { if (!(fe_count)) { transport_cmd_finish_abort(cmd, 1); return; } /* * TASK ABORTED status (TAS) bit support */ if (((tmr_nacl != NULL) && (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) transport_send_task_abort(cmd); transport_cmd_finish_abort(cmd, 0); } int core_tmr_lun_reset( struct se_device *dev, struct se_tmr_req *tmr, struct list_head *preempt_and_abort_list, struct se_cmd *prout_cmd) { struct se_cmd *cmd; struct se_queue_req *qr, *qr_tmp; struct se_node_acl *tmr_nacl = NULL; struct se_portal_group *tmr_tpg = NULL; struct se_queue_obj *qobj = dev->dev_queue_obj; struct se_tmr_req *tmr_p, *tmr_pp; struct se_task *task, *task_tmp; unsigned long flags; int fe_count, state, tas; /* * TASK_ABORTED status bit, this is configurable via ConfigFS * struct se_device attributes. spc4r17 section 7.4.6 Control mode page * * A task aborted status (TAS) bit set to zero specifies that aborted * tasks shall be terminated by the device server without any response * to the application client. A TAS bit set to one specifies that tasks * aborted by the actions of an I_T nexus other than the I_T nexus on * which the command was received shall be completed with TASK ABORTED * status (see SAM-4). */ tas = DEV_ATTRIB(dev)->emulate_tas; /* * Determine if this se_tmr is coming from a $FABRIC_MOD * or struct se_device passthrough.. */ if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; tmr_tpg = tmr->task_cmd->se_sess->se_tpg; if (tmr_nacl && tmr_tpg) { DEBUG_LR("LUN_RESET: TMR caller fabric: %s" " initiator port %s\n", TPG_TFO(tmr_tpg)->get_fabric_name(), tmr_nacl->initiatorname); } } DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n", (preempt_and_abort_list) ? "Preempt" : "TMR", TRANSPORT(dev)->name, tas); /* * Release all pending and outgoing TMRs aside from the received * LUN_RESET tmr.. */ spin_lock(&dev->se_tmr_lock); list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { /* * Allow the received TMR to return with FUNCTION_COMPLETE. */ if (tmr && (tmr_p == tmr)) continue; cmd = tmr_p->task_cmd; if (!(cmd)) { printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n"); continue; } /* * If this function was called with a valid pr_res_key * parameter (eg: for PROUT PREEMPT_AND_ABORT service action * skip non regisration key matching TMRs. */ if ((preempt_and_abort_list != NULL) && (core_scsi3_check_cdb_abort_and_preempt( preempt_and_abort_list, cmd) != 0)) continue; spin_unlock(&dev->se_tmr_lock); spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) { spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); spin_lock(&dev->se_tmr_lock); continue; } if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); spin_lock(&dev->se_tmr_lock); continue; } DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x," " Response: 0x%02x, t_state: %d\n", (preempt_and_abort_list) ? "Preempt" : "", tmr_p, tmr_p->function, tmr_p->response, cmd->t_state); spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); transport_cmd_finish_abort_tmr(cmd); spin_lock(&dev->se_tmr_lock); } spin_unlock(&dev->se_tmr_lock); /* * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. * This is following sam4r17, section 5.6 Aborting commands, Table 38 * for TMR LUN_RESET: * * a) "Yes" indicates that each command that is aborted on an I_T nexus * other than the one that caused the SCSI device condition is * completed with TASK ABORTED status, if the TAS bit is set to one in * the Control mode page (see SPC-4). "No" indicates that no status is * returned for aborted commands. * * d) If the logical unit reset is caused by a particular I_T nexus * (e.g., by a LOGICAL UNIT RESET task management function), then "yes" * (TASK_ABORTED status) applies. * * Otherwise (e.g., if triggered by a hard reset), "no" * (no TASK_ABORTED SAM status) applies. * * Note that this seems to be independent of TAS (Task Aborted Status) * in the Control Mode Page. */ spin_lock_irqsave(&dev->execute_task_lock, flags); list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, t_state_list) { if (!(TASK_CMD(task))) { printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); continue; } cmd = TASK_CMD(task); if (!T_TASK(cmd)) { printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" " %p ITT: 0x%08x\n", task, cmd, CMD_TFO(cmd)->get_task_tag(cmd)); continue; } /* * For PREEMPT_AND_ABORT usage, only process commands * with a matching reservation key. */ if ((preempt_and_abort_list != NULL) && (core_scsi3_check_cdb_abort_and_preempt( preempt_and_abort_list, cmd) != 0)) continue; /* * Not aborting PROUT PREEMPT_AND_ABORT CDB.. */ if (prout_cmd == cmd) continue; list_del(&task->t_state_list); atomic_set(&task->task_state_active, 0); spin_unlock_irqrestore(&dev->execute_task_lock, flags); spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); DEBUG_LR("LUN_RESET: %s cmd: %p task: %p" " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" "def_t_state: %d/%d cdb: 0x%02x\n", (preempt_and_abort_list) ? "Preempt" : "", cmd, task, CMD_TFO(cmd)->get_task_tag(cmd), 0, CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]); DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" " t_task_cdbs: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d -- t_transport_active: %d" " t_transport_stop: %d t_transport_sent: %d\n", CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key, T_TASK(cmd)->t_task_cdbs, atomic_read(&T_TASK(cmd)->t_task_cdbs_left), atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), atomic_read(&T_TASK(cmd)->t_transport_active), atomic_read(&T_TASK(cmd)->t_transport_stop), atomic_read(&T_TASK(cmd)->t_transport_sent)); if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); spin_unlock_irqrestore( &T_TASK(cmd)->t_state_lock, flags); DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown" " for dev: %p\n", task, dev); wait_for_completion(&task->task_stop_comp); DEBUG_LR("LUN_RESET Completed task: %p shutdown for" " dev: %p\n", task, dev); spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); } else { if (atomic_read(&task->task_execute_queue) != 0) transport_remove_task_from_execute_queue(task, dev); } __transport_stop_task_timer(task, &flags); if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { spin_unlock_irqrestore( &T_TASK(cmd)->t_state_lock, flags); DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } fe_count = atomic_read(&T_TASK(cmd)->t_fe_count); if (atomic_read(&T_TASK(cmd)->t_transport_active)) { DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" " task: %p, t_fe_count: %d dev: %p\n", task, fe_count, dev); atomic_set(&T_TASK(cmd)->t_transport_aborted, 1); spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," " t_fe_count: %d dev: %p\n", task, fe_count, dev); atomic_set(&T_TASK(cmd)->t_transport_aborted, 1); spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); spin_lock_irqsave(&dev->execute_task_lock, flags); } spin_unlock_irqrestore(&dev->execute_task_lock, flags); /* * Release all commands remaining in the struct se_device cmd queue. * * This follows the same logic as above for the struct se_device * struct se_task state list, where commands are returned with * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD * reference, otherwise the struct se_cmd is released. */ spin_lock_irqsave(&qobj->cmd_queue_lock, flags); list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) { cmd = (struct se_cmd *)qr->cmd; if (!(cmd)) { /* * Skip these for non PREEMPT_AND_ABORT usage.. */ if (preempt_and_abort_list != NULL) continue; atomic_dec(&qobj->queue_cnt); list_del(&qr->qr_list); kfree(qr); continue; } /* * For PREEMPT_AND_ABORT usage, only process commands * with a matching reservation key. */ if ((preempt_and_abort_list != NULL) && (core_scsi3_check_cdb_abort_and_preempt( preempt_and_abort_list, cmd) != 0)) continue; /* * Not aborting PROUT PREEMPT_AND_ABORT CDB.. */ if (prout_cmd == cmd) continue; atomic_dec(&T_TASK(cmd)->t_transport_queue_active); atomic_dec(&qobj->queue_cnt); list_del(&qr->qr_list); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); state = qr->state; kfree(qr); DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:" " %d t_fe_count: %d\n", (preempt_and_abort_list) ? "Preempt" : "", cmd, state, atomic_read(&T_TASK(cmd)->t_fe_count)); /* * Signal that the command has failed via cmd->se_cmd_flags, * and call TFO->new_cmd_failure() to wakeup any fabric * dependent code used to wait for unsolicited data out * allocation to complete. The fabric module is expected * to dump any remaining unsolicited data out for the aborted * command at this point. */ transport_new_cmd_failure(cmd); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, atomic_read(&T_TASK(cmd)->t_fe_count)); spin_lock_irqsave(&qobj->cmd_queue_lock, flags); } spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); /* * Clear any legacy SPC-2 reservation when called during * LOGICAL UNIT RESET */ if (!(preempt_and_abort_list) && (dev->dev_flags & DF_SPC2_RESERVATIONS)) { spin_lock(&dev->dev_reservation_lock); dev->dev_reserved_node_acl = NULL; dev->dev_flags &= ~DF_SPC2_RESERVATIONS; spin_unlock(&dev->dev_reservation_lock); printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n"); } spin_lock_irq(&dev->stats_lock); dev->num_resets++; spin_unlock_irq(&dev->stats_lock); DEBUG_LR("LUN_RESET: %s for [%s] Complete\n", (preempt_and_abort_list) ? "Preempt" : "TMR", TRANSPORT(dev)->name); return 0; }
gpl-2.0
BigBot96/android_kernel_samsung_espressovzw-jb
arch/powerpc/platforms/85xx/ksi8560.c
2889
6194
/* * Board setup routines for the Emerson KSI8560 * * Author: Alexandr Smirnov <asmirnov@ru.mvista.com> * * Based on mpc85xx_ads.c maintained by Kumar Gala * * 2008 (c) MontaVista, Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. * */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/of_platform.h> #include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/mpic.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <asm/prom.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include <asm/cpm2.h> #include <sysdev/cpm2_pic.h> #define KSI8560_CPLD_HVR 0x04 /* Hardware Version Register */ #define KSI8560_CPLD_PVR 0x08 /* PLD Version Register */ #define KSI8560_CPLD_RCR1 0x30 /* Reset Command Register 1 */ #define KSI8560_CPLD_RCR1_CPUHR 0x80 /* CPU Hard Reset */ static void __iomem *cpld_base = NULL; static void machine_restart(char *cmd) { if (cpld_base) out_8(cpld_base + KSI8560_CPLD_RCR1, KSI8560_CPLD_RCR1_CPUHR); else printk(KERN_ERR "Can't find CPLD base, hang forever\n"); for (;;); } static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); int cascade_irq; while ((cascade_irq = cpm2_get_irq()) >= 0) generic_handle_irq(cascade_irq); chip->irq_eoi(&desc->irq_data); } static void __init ksi8560_pic_init(void) { struct mpic *mpic; struct resource r; struct device_node *np; #ifdef CONFIG_CPM2 int irq; #endif np = of_find_node_by_type(NULL, "open-pic"); if (np == NULL) { printk(KERN_ERR "Could not find open-pic node\n"); return; } if (of_address_to_resource(np, 0, &r)) { printk(KERN_ERR "Could not map mpic register space\n"); of_node_put(np); return; } mpic = mpic_alloc(np, r.start, MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); of_node_put(np); mpic_init(mpic); #ifdef CONFIG_CPM2 /* Setup CPM2 PIC */ np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic"); if (np == NULL) { printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n"); return; } irq = irq_of_parse_and_map(np, 0); cpm2_pic_init(np); of_node_put(np); irq_set_chained_handler(irq, cpm2_cascade); #endif } #ifdef CONFIG_CPM2 /* * Setup I/O ports */ struct cpm_pin { int port, pin, flags; }; static struct cpm_pin __initdata ksi8560_pins[] = { /* SCC1 */ {3, 29, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {3, 31, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* SCC2 */ {3, 26, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {3, 27, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {3, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* FCC1 */ {0, 14, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {0, 15, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {0, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {0, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {0, 18, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {0, 19, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {0, 20, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {0, 21, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {0, 26, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, {0, 27, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, {0, 28, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {0, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {0, 30, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, {0, 31, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, {2, 23, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK9 */ {2, 22, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK10 */ }; static void __init init_ioports(void) { int i; for (i = 0; i < ARRAY_SIZE(ksi8560_pins); i++) { struct cpm_pin *pin = &ksi8560_pins[i]; cpm2_set_pin(pin->port, pin->pin, pin->flags); } cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_TX); cpm2_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_TX); cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK9, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK10, CPM_CLK_TX); } #endif /* * Setup the architecture */ static void __init ksi8560_setup_arch(void) { struct device_node *cpld; cpld = of_find_compatible_node(NULL, NULL, "emerson,KSI8560-cpld"); if (cpld) cpld_base = of_iomap(cpld, 0); else printk(KERN_ERR "Can't find CPLD in device tree\n"); if (ppc_md.progress) ppc_md.progress("ksi8560_setup_arch()", 0); #ifdef CONFIG_CPM2 cpm2_reset(); init_ioports(); #endif } static void ksi8560_show_cpuinfo(struct seq_file *m) { uint pvid, svid, phid1; pvid = mfspr(SPRN_PVR); svid = mfspr(SPRN_SVR); seq_printf(m, "Vendor\t\t: Emerson Network Power\n"); seq_printf(m, "Board\t\t: KSI8560\n"); if (cpld_base) { seq_printf(m, "Hardware rev\t: %d\n", in_8(cpld_base + KSI8560_CPLD_HVR)); seq_printf(m, "CPLD rev\t: %d\n", in_8(cpld_base + KSI8560_CPLD_PVR)); } else seq_printf(m, "Unknown Hardware and CPLD revs\n"); seq_printf(m, "PVR\t\t: 0x%x\n", pvid); seq_printf(m, "SVR\t\t: 0x%x\n", svid); /* Display cpu Pll setting */ phid1 = mfspr(SPRN_HID1); seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); } static struct of_device_id __initdata of_bus_ids[] = { { .type = "soc", }, { .type = "simple-bus", }, { .name = "cpm", }, { .name = "localbus", }, { .compatible = "gianfar", }, {}, }; static int __init declare_of_platform_devices(void) { of_platform_bus_probe(NULL, of_bus_ids, NULL); return 0; } machine_device_initcall(ksi8560, declare_of_platform_devices); /* * Called very early, device-tree isn't unflattened */ static int __init ksi8560_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "emerson,KSI8560"); } define_machine(ksi8560) { .name = "KSI8560", .probe = ksi8560_probe, .setup_arch = ksi8560_setup_arch, .init_IRQ = ksi8560_pic_init, .show_cpuinfo = ksi8560_show_cpuinfo, .get_irq = mpic_get_irq, .restart = machine_restart, .calibrate_decr = generic_calibrate_decr, };
gpl-2.0
TheBootloader/android_kernel_shooter
arch/m68k/platform/68VZ328/config.c
3145
4571
/***************************************************************************/ /* * linux/arch/m68knommu/platform/68VZ328/config.c * * Copyright (C) 1993 Hamish Macdonald * Copyright (C) 1999 D. Jeff Dionne * Copyright (C) 2001 Georges Menie, Ken Desmet * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ /***************************************************************************/ #include <linux/types.h> #include <linux/kernel.h> #include <linux/kd.h> #include <linux/netdevice.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/system.h> #include <asm/pgtable.h> #include <asm/machdep.h> #include <asm/MC68VZ328.h> #include <asm/bootstd.h> #ifdef CONFIG_INIT_LCD #include "bootlogo.h" #endif /***************************************************************************/ void m68328_timer_gettod(int *year, int *mon, int *day, int *hour, int *min, int *sec); /***************************************************************************/ /* Init Drangon Engine hardware */ /***************************************************************************/ #if defined(CONFIG_DRAGEN2) static void m68vz328_reset(void) { local_irq_disable(); #ifdef CONFIG_INIT_LCD PBDATA |= 0x20; /* disable CCFL light */ PKDATA |= 0x4; /* disable LCD controller */ LCKCON = 0; #endif __asm__ __volatile__( "reset\n\t" "moveal #0x04000000, %a0\n\t" "moveal 0(%a0), %sp\n\t" "moveal 4(%a0), %a0\n\t" "jmp (%a0)" ); } static void init_hardware(char *command, int size) { #ifdef CONFIG_DIRECT_IO_ACCESS SCR = 0x10; /* allow user access to internal registers */ #endif /* CSGB Init */ CSGBB = 0x4000; CSB = 0x1a1; /* CS8900 init */ /* PK3: hardware sleep function pin, active low */ PKSEL |= PK(3); /* select pin as I/O */ PKDIR |= PK(3); /* select pin as output */ PKDATA |= PK(3); /* set pin high */ /* PF5: hardware reset function pin, active high */ PFSEL |= PF(5); /* select pin as I/O */ PFDIR |= PF(5); /* select pin as output */ PFDATA &= ~PF(5); /* set pin low */ /* cs8900 hardware reset */ PFDATA |= PF(5); { int i; for (i = 0; i < 32000; ++i); } PFDATA &= ~PF(5); /* INT1 enable (cs8900 IRQ) */ PDPOL &= ~PD(1); /* active high signal */ PDIQEG &= ~PD(1); PDIRQEN |= PD(1); /* IRQ enabled */ #ifdef CONFIG_INIT_LCD /* initialize LCD controller */ LSSA = (long) screen_bits; LVPW = 0x14; LXMAX = 0x140; LYMAX = 0xef; LRRA = 0; LPXCD = 3; LPICF = 0x08; LPOLCF = 0; LCKCON = 0x80; PCPDEN = 0xff; PCSEL = 0; /* Enable LCD controller */ PKDIR |= 0x4; PKSEL |= 0x4; PKDATA &= ~0x4; /* Enable CCFL backlighting circuit */ PBDIR |= 0x20; PBSEL |= 0x20; PBDATA &= ~0x20; /* contrast control register */ PFDIR |= 0x1; PFSEL &= ~0x1; PWMR = 0x037F; #endif } /***************************************************************************/ /* Init RT-Control uCdimm hardware */ /***************************************************************************/ #elif defined(CONFIG_UCDIMM) static void m68vz328_reset(void) { local_irq_disable(); asm volatile ( "moveal #0x10c00000, %a0;\n\t" "moveb #0, 0xFFFFF300;\n\t" "moveal 0(%a0), %sp;\n\t" "moveal 4(%a0), %a0;\n\t" "jmp (%a0);\n" ); } unsigned char *cs8900a_hwaddr; static int errno; _bsc0(char *, getserialnum) _bsc1(unsigned char *, gethwaddr, int, a) _bsc1(char *, getbenv, char *, a) static void init_hardware(char *command, int size) { char *p; printk(KERN_INFO "uCdimm serial string [%s]\n", getserialnum()); p = cs8900a_hwaddr = gethwaddr(0); printk(KERN_INFO "uCdimm hwaddr %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", p[0], p[1], p[2], p[3], p[4], p[5]); p = getbenv("APPEND"); if (p) strcpy(p, command); else command[0] = 0; } /***************************************************************************/ #else static void m68vz328_reset(void) { } static void init_hardware(char *command, int size) { } /***************************************************************************/ #endif /***************************************************************************/ void config_BSP(char *command, int size) { printk(KERN_INFO "68VZ328 DragonBallVZ support (c) 2001 Lineo, Inc.\n"); init_hardware(command, size); mach_gettod = m68328_timer_gettod; mach_reset = m68vz328_reset; } /***************************************************************************/
gpl-2.0
LeJay/android_kernel_samsung_jactiveltexx_stock
net/sunrpc/xprtrdma/svc_rdma_transport.c
4937
38881
/* * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the BSD-type * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Network Appliance, Inc. nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Author: Tom Tucker <tom@opengridcomputing.com> */ #include <linux/sunrpc/svc_xprt.h> #include <linux/sunrpc/debug.h> #include <linux/sunrpc/rpc_rdma.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> #include <linux/sunrpc/svc_rdma.h> #include <linux/export.h> #include "xprt_rdma.h" #define RPCDBG_FACILITY RPCDBG_SVCXPRT static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, struct net *net, struct sockaddr *sa, int salen, int flags); static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); static void svc_rdma_release_rqst(struct svc_rqst *); static void dto_tasklet_func(unsigned long data); static void svc_rdma_detach(struct svc_xprt *xprt); static void svc_rdma_free(struct svc_xprt *xprt); static int svc_rdma_has_wspace(struct svc_xprt *xprt); static void rq_cq_reap(struct svcxprt_rdma *xprt); static void sq_cq_reap(struct svcxprt_rdma *xprt); static DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL); static DEFINE_SPINLOCK(dto_lock); static LIST_HEAD(dto_xprt_q); static struct svc_xprt_ops svc_rdma_ops = { .xpo_create = svc_rdma_create, .xpo_recvfrom = svc_rdma_recvfrom, .xpo_sendto = svc_rdma_sendto, .xpo_release_rqst = svc_rdma_release_rqst, .xpo_detach = svc_rdma_detach, .xpo_free = svc_rdma_free, .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr, .xpo_has_wspace = svc_rdma_has_wspace, .xpo_accept = svc_rdma_accept, }; struct svc_xprt_class svc_rdma_class = { .xcl_name = "rdma", .xcl_owner = THIS_MODULE, .xcl_ops = &svc_rdma_ops, .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, }; struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) { struct svc_rdma_op_ctxt *ctxt; while (1) { ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL); if (ctxt) break; schedule_timeout_uninterruptible(msecs_to_jiffies(500)); } ctxt->xprt = xprt; INIT_LIST_HEAD(&ctxt->dto_q); ctxt->count = 0; ctxt->frmr = NULL; atomic_inc(&xprt->sc_ctxt_used); return ctxt; } void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) { struct svcxprt_rdma *xprt = ctxt->xprt; int i; for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) { /* * Unmap the DMA addr in the SGE if the lkey matches * the sc_dma_lkey, otherwise, ignore it since it is * an FRMR lkey and will be unmapped later when the * last WR that uses it completes. */ if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) { atomic_dec(&xprt->sc_dma_used); ib_dma_unmap_page(xprt->sc_cm_id->device, ctxt->sge[i].addr, ctxt->sge[i].length, ctxt->direction); } } } void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) { struct svcxprt_rdma *xprt; int i; BUG_ON(!ctxt); xprt = ctxt->xprt; if (free_pages) for (i = 0; i < ctxt->count; i++) put_page(ctxt->pages[i]); kmem_cache_free(svc_rdma_ctxt_cachep, ctxt); atomic_dec(&xprt->sc_ctxt_used); } /* * Temporary NFS req mappings are shared across all transport * instances. These are short lived and should be bounded by the number * of concurrent server threads * depth of the SQ. */ struct svc_rdma_req_map *svc_rdma_get_req_map(void) { struct svc_rdma_req_map *map; while (1) { map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL); if (map) break; schedule_timeout_uninterruptible(msecs_to_jiffies(500)); } map->count = 0; map->frmr = NULL; return map; } void svc_rdma_put_req_map(struct svc_rdma_req_map *map) { kmem_cache_free(svc_rdma_map_cachep, map); } /* ib_cq event handler */ static void cq_event_handler(struct ib_event *event, void *context) { struct svc_xprt *xprt = context; dprintk("svcrdma: received CQ event id=%d, context=%p\n", event->event, context); set_bit(XPT_CLOSE, &xprt->xpt_flags); } /* QP event handler */ static void qp_event_handler(struct ib_event *event, void *context) { struct svc_xprt *xprt = context; switch (event->event) { /* These are considered benign events */ case IB_EVENT_PATH_MIG: case IB_EVENT_COMM_EST: case IB_EVENT_SQ_DRAINED: case IB_EVENT_QP_LAST_WQE_REACHED: dprintk("svcrdma: QP event %d received for QP=%p\n", event->event, event->element.qp); break; /* These are considered fatal events */ case IB_EVENT_PATH_MIG_ERR: case IB_EVENT_QP_FATAL: case IB_EVENT_QP_REQ_ERR: case IB_EVENT_QP_ACCESS_ERR: case IB_EVENT_DEVICE_FATAL: default: dprintk("svcrdma: QP ERROR event %d received for QP=%p, " "closing transport\n", event->event, event->element.qp); set_bit(XPT_CLOSE, &xprt->xpt_flags); break; } } /* * Data Transfer Operation Tasklet * * Walks a list of transports with I/O pending, removing entries as * they are added to the server's I/O pending list. Two bits indicate * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave * spinlock that serializes access to the transport list with the RQ * and SQ interrupt handlers. */ static void dto_tasklet_func(unsigned long data) { struct svcxprt_rdma *xprt; unsigned long flags; spin_lock_irqsave(&dto_lock, flags); while (!list_empty(&dto_xprt_q)) { xprt = list_entry(dto_xprt_q.next, struct svcxprt_rdma, sc_dto_q); list_del_init(&xprt->sc_dto_q); spin_unlock_irqrestore(&dto_lock, flags); rq_cq_reap(xprt); sq_cq_reap(xprt); svc_xprt_put(&xprt->sc_xprt); spin_lock_irqsave(&dto_lock, flags); } spin_unlock_irqrestore(&dto_lock, flags); } /* * Receive Queue Completion Handler * * Since an RQ completion handler is called on interrupt context, we * need to defer the handling of the I/O to a tasklet */ static void rq_comp_handler(struct ib_cq *cq, void *cq_context) { struct svcxprt_rdma *xprt = cq_context; unsigned long flags; /* Guard against unconditional flush call for destroyed QP */ if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) return; /* * Set the bit regardless of whether or not it's on the list * because it may be on the list already due to an SQ * completion. */ set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags); /* * If this transport is not already on the DTO transport queue, * add it */ spin_lock_irqsave(&dto_lock, flags); if (list_empty(&xprt->sc_dto_q)) { svc_xprt_get(&xprt->sc_xprt); list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); } spin_unlock_irqrestore(&dto_lock, flags); /* Tasklet does all the work to avoid irqsave locks. */ tasklet_schedule(&dto_tasklet); } /* * rq_cq_reap - Process the RQ CQ. * * Take all completing WC off the CQE and enqueue the associated DTO * context on the dto_q for the transport. * * Note that caller must hold a transport reference. */ static void rq_cq_reap(struct svcxprt_rdma *xprt) { int ret; struct ib_wc wc; struct svc_rdma_op_ctxt *ctxt = NULL; if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags)) return; ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP); atomic_inc(&rdma_stat_rq_poll); while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) { ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; ctxt->wc_status = wc.status; ctxt->byte_len = wc.byte_len; svc_rdma_unmap_dma(ctxt); if (wc.status != IB_WC_SUCCESS) { /* Close the transport */ dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt); set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); svc_rdma_put_context(ctxt, 1); svc_xprt_put(&xprt->sc_xprt); continue; } spin_lock_bh(&xprt->sc_rq_dto_lock); list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q); spin_unlock_bh(&xprt->sc_rq_dto_lock); svc_xprt_put(&xprt->sc_xprt); } if (ctxt) atomic_inc(&rdma_stat_rq_prod); set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); /* * If data arrived before established event, * don't enqueue. This defers RPC I/O until the * RDMA connection is complete. */ if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) svc_xprt_enqueue(&xprt->sc_xprt); } /* * Process a completion context */ static void process_context(struct svcxprt_rdma *xprt, struct svc_rdma_op_ctxt *ctxt) { svc_rdma_unmap_dma(ctxt); switch (ctxt->wr_op) { case IB_WR_SEND: if (test_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags)) svc_rdma_put_frmr(xprt, ctxt->frmr); svc_rdma_put_context(ctxt, 1); break; case IB_WR_RDMA_WRITE: svc_rdma_put_context(ctxt, 0); break; case IB_WR_RDMA_READ: case IB_WR_RDMA_READ_WITH_INV: if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) { struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr; BUG_ON(!read_hdr); if (test_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags)) svc_rdma_put_frmr(xprt, ctxt->frmr); spin_lock_bh(&xprt->sc_rq_dto_lock); set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); list_add_tail(&read_hdr->dto_q, &xprt->sc_read_complete_q); spin_unlock_bh(&xprt->sc_rq_dto_lock); svc_xprt_enqueue(&xprt->sc_xprt); } svc_rdma_put_context(ctxt, 0); break; default: printk(KERN_ERR "svcrdma: unexpected completion type, " "opcode=%d\n", ctxt->wr_op); break; } } /* * Send Queue Completion Handler - potentially called on interrupt context. * * Note that caller must hold a transport reference. */ static void sq_cq_reap(struct svcxprt_rdma *xprt) { struct svc_rdma_op_ctxt *ctxt = NULL; struct ib_wc wc; struct ib_cq *cq = xprt->sc_sq_cq; int ret; if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags)) return; ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP); atomic_inc(&rdma_stat_sq_poll); while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) { if (wc.status != IB_WC_SUCCESS) /* Close the transport */ set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); /* Decrement used SQ WR count */ atomic_dec(&xprt->sc_sq_count); wake_up(&xprt->sc_send_wait); ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; if (ctxt) process_context(xprt, ctxt); svc_xprt_put(&xprt->sc_xprt); } if (ctxt) atomic_inc(&rdma_stat_sq_prod); } static void sq_comp_handler(struct ib_cq *cq, void *cq_context) { struct svcxprt_rdma *xprt = cq_context; unsigned long flags; /* Guard against unconditional flush call for destroyed QP */ if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) return; /* * Set the bit regardless of whether or not it's on the list * because it may be on the list already due to an RQ * completion. */ set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags); /* * If this transport is not already on the DTO transport queue, * add it */ spin_lock_irqsave(&dto_lock, flags); if (list_empty(&xprt->sc_dto_q)) { svc_xprt_get(&xprt->sc_xprt); list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); } spin_unlock_irqrestore(&dto_lock, flags); /* Tasklet does all the work to avoid irqsave locks. */ tasklet_schedule(&dto_tasklet); } static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, int listener) { struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL); if (!cma_xprt) return NULL; svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv); INIT_LIST_HEAD(&cma_xprt->sc_accept_q); INIT_LIST_HEAD(&cma_xprt->sc_dto_q); INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q); INIT_LIST_HEAD(&cma_xprt->sc_frmr_q); init_waitqueue_head(&cma_xprt->sc_send_wait); spin_lock_init(&cma_xprt->sc_lock); spin_lock_init(&cma_xprt->sc_rq_dto_lock); spin_lock_init(&cma_xprt->sc_frmr_q_lock); cma_xprt->sc_ord = svcrdma_ord; cma_xprt->sc_max_req_size = svcrdma_max_req_size; cma_xprt->sc_max_requests = svcrdma_max_requests; cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT; atomic_set(&cma_xprt->sc_sq_count, 0); atomic_set(&cma_xprt->sc_ctxt_used, 0); if (listener) set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags); return cma_xprt; } struct page *svc_rdma_get_page(void) { struct page *page; while ((page = alloc_page(GFP_KERNEL)) == NULL) { /* If we can't get memory, wait a bit and try again */ printk(KERN_INFO "svcrdma: out of memory...retrying in 1000 " "jiffies.\n"); schedule_timeout_uninterruptible(msecs_to_jiffies(1000)); } return page; } int svc_rdma_post_recv(struct svcxprt_rdma *xprt) { struct ib_recv_wr recv_wr, *bad_recv_wr; struct svc_rdma_op_ctxt *ctxt; struct page *page; dma_addr_t pa; int sge_no; int buflen; int ret; ctxt = svc_rdma_get_context(xprt); buflen = 0; ctxt->direction = DMA_FROM_DEVICE; for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) { BUG_ON(sge_no >= xprt->sc_max_sge); page = svc_rdma_get_page(); ctxt->pages[sge_no] = page; pa = ib_dma_map_page(xprt->sc_cm_id->device, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) goto err_put_ctxt; atomic_inc(&xprt->sc_dma_used); ctxt->sge[sge_no].addr = pa; ctxt->sge[sge_no].length = PAGE_SIZE; ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey; ctxt->count = sge_no + 1; buflen += PAGE_SIZE; } recv_wr.next = NULL; recv_wr.sg_list = &ctxt->sge[0]; recv_wr.num_sge = ctxt->count; recv_wr.wr_id = (u64)(unsigned long)ctxt; svc_xprt_get(&xprt->sc_xprt); ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr); if (ret) { svc_rdma_unmap_dma(ctxt); svc_rdma_put_context(ctxt, 1); svc_xprt_put(&xprt->sc_xprt); } return ret; err_put_ctxt: svc_rdma_unmap_dma(ctxt); svc_rdma_put_context(ctxt, 1); return -ENOMEM; } /* * This function handles the CONNECT_REQUEST event on a listening * endpoint. It is passed the cma_id for the _new_ connection. The context in * this cma_id is inherited from the listening cma_id and is the svc_xprt * structure for the listening endpoint. * * This function creates a new xprt for the new connection and enqueues it on * the accept queue for the listent xprt. When the listen thread is kicked, it * will call the recvfrom method on the listen xprt which will accept the new * connection. */ static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird) { struct svcxprt_rdma *listen_xprt = new_cma_id->context; struct svcxprt_rdma *newxprt; struct sockaddr *sa; /* Create a new transport */ newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0); if (!newxprt) { dprintk("svcrdma: failed to create new transport\n"); return; } newxprt->sc_cm_id = new_cma_id; new_cma_id->context = newxprt; dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n", newxprt, newxprt->sc_cm_id, listen_xprt); /* Save client advertised inbound read limit for use later in accept. */ newxprt->sc_ord = client_ird; /* Set the local and remote addresses in the transport */ sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa)); sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr; svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa)); /* * Enqueue the new transport on the accept queue of the listening * transport */ spin_lock_bh(&listen_xprt->sc_lock); list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); spin_unlock_bh(&listen_xprt->sc_lock); /* * Can't use svc_xprt_received here because we are not on a * rqstp thread */ set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags); svc_xprt_enqueue(&listen_xprt->sc_xprt); } /* * Handles events generated on the listening endpoint. These events will be * either be incoming connect requests or adapter removal events. */ static int rdma_listen_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { struct svcxprt_rdma *xprt = cma_id->context; int ret = 0; switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, " "event=%d\n", cma_id, cma_id->context, event->event); handle_connect_req(cma_id, event->param.conn.initiator_depth); break; case RDMA_CM_EVENT_ESTABLISHED: /* Accept complete */ dprintk("svcrdma: Connection completed on LISTEN xprt=%p, " "cm_id=%p\n", xprt, cma_id); break; case RDMA_CM_EVENT_DEVICE_REMOVAL: dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n", xprt, cma_id); if (xprt) set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); break; default: dprintk("svcrdma: Unexpected event on listening endpoint %p, " "event=%d\n", cma_id, event->event); break; } return ret; } static int rdma_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { struct svc_xprt *xprt = cma_id->context; struct svcxprt_rdma *rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); switch (event->event) { case RDMA_CM_EVENT_ESTABLISHED: /* Accept complete */ svc_xprt_get(xprt); dprintk("svcrdma: Connection completed on DTO xprt=%p, " "cm_id=%p\n", xprt, cma_id); clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); svc_xprt_enqueue(xprt); break; case RDMA_CM_EVENT_DISCONNECTED: dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n", xprt, cma_id); if (xprt) { set_bit(XPT_CLOSE, &xprt->xpt_flags); svc_xprt_enqueue(xprt); svc_xprt_put(xprt); } break; case RDMA_CM_EVENT_DEVICE_REMOVAL: dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, " "event=%d\n", cma_id, xprt, event->event); if (xprt) { set_bit(XPT_CLOSE, &xprt->xpt_flags); svc_xprt_enqueue(xprt); } break; default: dprintk("svcrdma: Unexpected event on DTO endpoint %p, " "event=%d\n", cma_id, event->event); break; } return 0; } /* * Create a listening RDMA service endpoint. */ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, struct net *net, struct sockaddr *sa, int salen, int flags) { struct rdma_cm_id *listen_id; struct svcxprt_rdma *cma_xprt; struct svc_xprt *xprt; int ret; dprintk("svcrdma: Creating RDMA socket\n"); if (sa->sa_family != AF_INET) { dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family); return ERR_PTR(-EAFNOSUPPORT); } cma_xprt = rdma_create_xprt(serv, 1); if (!cma_xprt) return ERR_PTR(-ENOMEM); xprt = &cma_xprt->sc_xprt; listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(listen_id)) { ret = PTR_ERR(listen_id); dprintk("svcrdma: rdma_create_id failed = %d\n", ret); goto err0; } ret = rdma_bind_addr(listen_id, sa); if (ret) { dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret); goto err1; } cma_xprt->sc_cm_id = listen_id; ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG); if (ret) { dprintk("svcrdma: rdma_listen failed = %d\n", ret); goto err1; } /* * We need to use the address from the cm_id in case the * caller specified 0 for the port number. */ sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr; svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen); return &cma_xprt->sc_xprt; err1: rdma_destroy_id(listen_id); err0: kfree(cma_xprt); return ERR_PTR(ret); } static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt) { struct ib_mr *mr; struct ib_fast_reg_page_list *pl; struct svc_rdma_fastreg_mr *frmr; frmr = kmalloc(sizeof(*frmr), GFP_KERNEL); if (!frmr) goto err; mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES); if (IS_ERR(mr)) goto err_free_frmr; pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device, RPCSVC_MAXPAGES); if (IS_ERR(pl)) goto err_free_mr; frmr->mr = mr; frmr->page_list = pl; INIT_LIST_HEAD(&frmr->frmr_list); return frmr; err_free_mr: ib_dereg_mr(mr); err_free_frmr: kfree(frmr); err: return ERR_PTR(-ENOMEM); } static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt) { struct svc_rdma_fastreg_mr *frmr; while (!list_empty(&xprt->sc_frmr_q)) { frmr = list_entry(xprt->sc_frmr_q.next, struct svc_rdma_fastreg_mr, frmr_list); list_del_init(&frmr->frmr_list); ib_dereg_mr(frmr->mr); ib_free_fast_reg_page_list(frmr->page_list); kfree(frmr); } } struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma) { struct svc_rdma_fastreg_mr *frmr = NULL; spin_lock_bh(&rdma->sc_frmr_q_lock); if (!list_empty(&rdma->sc_frmr_q)) { frmr = list_entry(rdma->sc_frmr_q.next, struct svc_rdma_fastreg_mr, frmr_list); list_del_init(&frmr->frmr_list); frmr->map_len = 0; frmr->page_list_len = 0; } spin_unlock_bh(&rdma->sc_frmr_q_lock); if (frmr) return frmr; return rdma_alloc_frmr(rdma); } static void frmr_unmap_dma(struct svcxprt_rdma *xprt, struct svc_rdma_fastreg_mr *frmr) { int page_no; for (page_no = 0; page_no < frmr->page_list_len; page_no++) { dma_addr_t addr = frmr->page_list->page_list[page_no]; if (ib_dma_mapping_error(frmr->mr->device, addr)) continue; atomic_dec(&xprt->sc_dma_used); ib_dma_unmap_page(frmr->mr->device, addr, PAGE_SIZE, frmr->direction); } } void svc_rdma_put_frmr(struct svcxprt_rdma *rdma, struct svc_rdma_fastreg_mr *frmr) { if (frmr) { frmr_unmap_dma(rdma, frmr); spin_lock_bh(&rdma->sc_frmr_q_lock); BUG_ON(!list_empty(&frmr->frmr_list)); list_add(&frmr->frmr_list, &rdma->sc_frmr_q); spin_unlock_bh(&rdma->sc_frmr_q_lock); } } /* * This is the xpo_recvfrom function for listening endpoints. Its * purpose is to accept incoming connections. The CMA callback handler * has already created a new transport and attached it to the new CMA * ID. * * There is a queue of pending connections hung on the listening * transport. This queue contains the new svc_xprt structure. This * function takes svc_xprt structures off the accept_q and completes * the connection. */ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) { struct svcxprt_rdma *listen_rdma; struct svcxprt_rdma *newxprt = NULL; struct rdma_conn_param conn_param; struct ib_qp_init_attr qp_attr; struct ib_device_attr devattr; int uninitialized_var(dma_mr_acc); int need_dma_mr; int ret; int i; listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); clear_bit(XPT_CONN, &xprt->xpt_flags); /* Get the next entry off the accept list */ spin_lock_bh(&listen_rdma->sc_lock); if (!list_empty(&listen_rdma->sc_accept_q)) { newxprt = list_entry(listen_rdma->sc_accept_q.next, struct svcxprt_rdma, sc_accept_q); list_del_init(&newxprt->sc_accept_q); } if (!list_empty(&listen_rdma->sc_accept_q)) set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags); spin_unlock_bh(&listen_rdma->sc_lock); if (!newxprt) return NULL; dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n", newxprt, newxprt->sc_cm_id); ret = ib_query_device(newxprt->sc_cm_id->device, &devattr); if (ret) { dprintk("svcrdma: could not query device attributes on " "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret); goto errout; } /* Qualify the transport resource defaults with the * capabilities of this particular device */ newxprt->sc_max_sge = min((size_t)devattr.max_sge, (size_t)RPCSVC_MAXPAGES); newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr, (size_t)svcrdma_max_requests); newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests; /* * Limit ORD based on client limit, local device limit, and * configured svcrdma limit. */ newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord); newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord); newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device); if (IS_ERR(newxprt->sc_pd)) { dprintk("svcrdma: error creating PD for connect request\n"); goto errout; } newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device, sq_comp_handler, cq_event_handler, newxprt, newxprt->sc_sq_depth, 0); if (IS_ERR(newxprt->sc_sq_cq)) { dprintk("svcrdma: error creating SQ CQ for connect request\n"); goto errout; } newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device, rq_comp_handler, cq_event_handler, newxprt, newxprt->sc_max_requests, 0); if (IS_ERR(newxprt->sc_rq_cq)) { dprintk("svcrdma: error creating RQ CQ for connect request\n"); goto errout; } memset(&qp_attr, 0, sizeof qp_attr); qp_attr.event_handler = qp_event_handler; qp_attr.qp_context = &newxprt->sc_xprt; qp_attr.cap.max_send_wr = newxprt->sc_sq_depth; qp_attr.cap.max_recv_wr = newxprt->sc_max_requests; qp_attr.cap.max_send_sge = newxprt->sc_max_sge; qp_attr.cap.max_recv_sge = newxprt->sc_max_sge; qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; qp_attr.qp_type = IB_QPT_RC; qp_attr.send_cq = newxprt->sc_sq_cq; qp_attr.recv_cq = newxprt->sc_rq_cq; dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n" " cm_id->device=%p, sc_pd->device=%p\n" " cap.max_send_wr = %d\n" " cap.max_recv_wr = %d\n" " cap.max_send_sge = %d\n" " cap.max_recv_sge = %d\n", newxprt->sc_cm_id, newxprt->sc_pd, newxprt->sc_cm_id->device, newxprt->sc_pd->device, qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr, qp_attr.cap.max_send_sge, qp_attr.cap.max_recv_sge); ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); if (ret) { /* * XXX: This is a hack. We need a xx_request_qp interface * that will adjust the qp_attr's with a best-effort * number */ qp_attr.cap.max_send_sge -= 2; qp_attr.cap.max_recv_sge -= 2; ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); if (ret) { dprintk("svcrdma: failed to create QP, ret=%d\n", ret); goto errout; } newxprt->sc_max_sge = qp_attr.cap.max_send_sge; newxprt->sc_max_sge = qp_attr.cap.max_recv_sge; newxprt->sc_sq_depth = qp_attr.cap.max_send_wr; newxprt->sc_max_requests = qp_attr.cap.max_recv_wr; } newxprt->sc_qp = newxprt->sc_cm_id->qp; /* * Use the most secure set of MR resources based on the * transport type and available memory management features in * the device. Here's the table implemented below: * * Fast Global DMA Remote WR * Reg LKEY MR Access * Sup'd Sup'd Needed Needed * * IWARP N N Y Y * N Y Y Y * Y N Y N * Y Y N - * * IB N N Y N * N Y N - * Y N Y N * Y Y N - * * NB: iWARP requires remote write access for the data sink * of an RDMA_READ. IB does not. */ if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { newxprt->sc_frmr_pg_list_len = devattr.max_fast_reg_page_list_len; newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG; } /* * Determine if a DMA MR is required and if so, what privs are required */ switch (rdma_node_get_transport(newxprt->sc_cm_id->device->node_type)) { case RDMA_TRANSPORT_IWARP: newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV; if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) { need_dma_mr = 1; dma_mr_acc = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE); } else if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) { need_dma_mr = 1; dma_mr_acc = IB_ACCESS_LOCAL_WRITE; } else need_dma_mr = 0; break; case RDMA_TRANSPORT_IB: if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) { need_dma_mr = 1; dma_mr_acc = IB_ACCESS_LOCAL_WRITE; } else need_dma_mr = 0; break; default: goto errout; } /* Create the DMA MR if needed, otherwise, use the DMA LKEY */ if (need_dma_mr) { /* Register all of physical memory */ newxprt->sc_phys_mr = ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc); if (IS_ERR(newxprt->sc_phys_mr)) { dprintk("svcrdma: Failed to create DMA MR ret=%d\n", ret); goto errout; } newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey; } else newxprt->sc_dma_lkey = newxprt->sc_cm_id->device->local_dma_lkey; /* Post receive buffers */ for (i = 0; i < newxprt->sc_max_requests; i++) { ret = svc_rdma_post_recv(newxprt); if (ret) { dprintk("svcrdma: failure posting receive buffers\n"); goto errout; } } /* Swap out the handler */ newxprt->sc_cm_id->event_handler = rdma_cma_handler; /* * Arm the CQs for the SQ and RQ before accepting so we can't * miss the first message */ ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP); ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP); /* Accept Connection */ set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags); memset(&conn_param, 0, sizeof conn_param); conn_param.responder_resources = 0; conn_param.initiator_depth = newxprt->sc_ord; ret = rdma_accept(newxprt->sc_cm_id, &conn_param); if (ret) { dprintk("svcrdma: failed to accept new connection, ret=%d\n", ret); goto errout; } dprintk("svcrdma: new connection %p accepted with the following " "attributes:\n" " local_ip : %pI4\n" " local_port : %d\n" " remote_ip : %pI4\n" " remote_port : %d\n" " max_sge : %d\n" " sq_depth : %d\n" " max_requests : %d\n" " ord : %d\n", newxprt, &((struct sockaddr_in *)&newxprt->sc_cm_id-> route.addr.src_addr)->sin_addr.s_addr, ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> route.addr.src_addr)->sin_port), &((struct sockaddr_in *)&newxprt->sc_cm_id-> route.addr.dst_addr)->sin_addr.s_addr, ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> route.addr.dst_addr)->sin_port), newxprt->sc_max_sge, newxprt->sc_sq_depth, newxprt->sc_max_requests, newxprt->sc_ord); return &newxprt->sc_xprt; errout: dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret); /* Take a reference in case the DTO handler runs */ svc_xprt_get(&newxprt->sc_xprt); if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) ib_destroy_qp(newxprt->sc_qp); rdma_destroy_id(newxprt->sc_cm_id); /* This call to put will destroy the transport */ svc_xprt_put(&newxprt->sc_xprt); return NULL; } static void svc_rdma_release_rqst(struct svc_rqst *rqstp) { } /* * When connected, an svc_xprt has at least two references: * * - A reference held by the cm_id between the ESTABLISHED and * DISCONNECTED events. If the remote peer disconnected first, this * reference could be gone. * * - A reference held by the svc_recv code that called this function * as part of close processing. * * At a minimum one references should still be held. */ static void svc_rdma_detach(struct svc_xprt *xprt) { struct svcxprt_rdma *rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); dprintk("svc: svc_rdma_detach(%p)\n", xprt); /* Disconnect and flush posted WQE */ rdma_disconnect(rdma->sc_cm_id); } static void __svc_rdma_free(struct work_struct *work) { struct svcxprt_rdma *rdma = container_of(work, struct svcxprt_rdma, sc_work); dprintk("svcrdma: svc_rdma_free(%p)\n", rdma); /* We should only be called from kref_put */ BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0); /* * Destroy queued, but not processed read completions. Note * that this cleanup has to be done before destroying the * cm_id because the device ptr is needed to unmap the dma in * svc_rdma_put_context. */ while (!list_empty(&rdma->sc_read_complete_q)) { struct svc_rdma_op_ctxt *ctxt; ctxt = list_entry(rdma->sc_read_complete_q.next, struct svc_rdma_op_ctxt, dto_q); list_del_init(&ctxt->dto_q); svc_rdma_put_context(ctxt, 1); } /* Destroy queued, but not processed recv completions */ while (!list_empty(&rdma->sc_rq_dto_q)) { struct svc_rdma_op_ctxt *ctxt; ctxt = list_entry(rdma->sc_rq_dto_q.next, struct svc_rdma_op_ctxt, dto_q); list_del_init(&ctxt->dto_q); svc_rdma_put_context(ctxt, 1); } /* Warn if we leaked a resource or under-referenced */ WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0); WARN_ON(atomic_read(&rdma->sc_dma_used) != 0); /* De-allocate fastreg mr */ rdma_dealloc_frmr_q(rdma); /* Destroy the QP if present (not a listener) */ if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) ib_destroy_qp(rdma->sc_qp); if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) ib_destroy_cq(rdma->sc_sq_cq); if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq)) ib_destroy_cq(rdma->sc_rq_cq); if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr)) ib_dereg_mr(rdma->sc_phys_mr); if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) ib_dealloc_pd(rdma->sc_pd); /* Destroy the CM ID */ rdma_destroy_id(rdma->sc_cm_id); kfree(rdma); } static void svc_rdma_free(struct svc_xprt *xprt) { struct svcxprt_rdma *rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); INIT_WORK(&rdma->sc_work, __svc_rdma_free); queue_work(svc_rdma_wq, &rdma->sc_work); } static int svc_rdma_has_wspace(struct svc_xprt *xprt) { struct svcxprt_rdma *rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); /* * If there are fewer SQ WR available than required to send a * simple response, return false. */ if ((rdma->sc_sq_depth - atomic_read(&rdma->sc_sq_count) < 3)) return 0; /* * ...or there are already waiters on the SQ, * return false. */ if (waitqueue_active(&rdma->sc_send_wait)) return 0; /* Otherwise return true. */ return 1; } /* * Attempt to register the kvec representing the RPC memory with the * device. * * Returns: * NULL : The device does not support fastreg or there were no more * fastreg mr. * frmr : The kvec register request was successfully posted. * <0 : An error was encountered attempting to register the kvec. */ int svc_rdma_fastreg(struct svcxprt_rdma *xprt, struct svc_rdma_fastreg_mr *frmr) { struct ib_send_wr fastreg_wr; u8 key; /* Bump the key */ key = (u8)(frmr->mr->lkey & 0x000000FF); ib_update_fast_reg_key(frmr->mr, ++key); /* Prepare FASTREG WR */ memset(&fastreg_wr, 0, sizeof fastreg_wr); fastreg_wr.opcode = IB_WR_FAST_REG_MR; fastreg_wr.send_flags = IB_SEND_SIGNALED; fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva; fastreg_wr.wr.fast_reg.page_list = frmr->page_list; fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len; fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT; fastreg_wr.wr.fast_reg.length = frmr->map_len; fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags; fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey; return svc_rdma_send(xprt, &fastreg_wr); } int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) { struct ib_send_wr *bad_wr, *n_wr; int wr_count; int i; int ret; if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) return -ENOTCONN; BUG_ON(wr->send_flags != IB_SEND_SIGNALED); wr_count = 1; for (n_wr = wr->next; n_wr; n_wr = n_wr->next) wr_count++; /* If the SQ is full, wait until an SQ entry is available */ while (1) { spin_lock_bh(&xprt->sc_lock); if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) { spin_unlock_bh(&xprt->sc_lock); atomic_inc(&rdma_stat_sq_starve); /* See if we can opportunistically reap SQ WR to make room */ sq_cq_reap(xprt); /* Wait until SQ WR available if SQ still full */ wait_event(xprt->sc_send_wait, atomic_read(&xprt->sc_sq_count) < xprt->sc_sq_depth); if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) return -ENOTCONN; continue; } /* Take a transport ref for each WR posted */ for (i = 0; i < wr_count; i++) svc_xprt_get(&xprt->sc_xprt); /* Bump used SQ WR count and post */ atomic_add(wr_count, &xprt->sc_sq_count); ret = ib_post_send(xprt->sc_qp, wr, &bad_wr); if (ret) { set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); atomic_sub(wr_count, &xprt->sc_sq_count); for (i = 0; i < wr_count; i ++) svc_xprt_put(&xprt->sc_xprt); dprintk("svcrdma: failed to post SQ WR rc=%d, " "sc_sq_count=%d, sc_sq_depth=%d\n", ret, atomic_read(&xprt->sc_sq_count), xprt->sc_sq_depth); } spin_unlock_bh(&xprt->sc_lock); if (ret) wake_up(&xprt->sc_send_wait); break; } return ret; } void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, enum rpcrdma_errcode err) { struct ib_send_wr err_wr; struct page *p; struct svc_rdma_op_ctxt *ctxt; u32 *va; int length; int ret; p = svc_rdma_get_page(); va = page_address(p); /* XDR encode error */ length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); ctxt = svc_rdma_get_context(xprt); ctxt->direction = DMA_FROM_DEVICE; ctxt->count = 1; ctxt->pages[0] = p; /* Prepare SGE for local address */ ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device, p, 0, length, DMA_FROM_DEVICE); if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { put_page(p); svc_rdma_put_context(ctxt, 1); return; } atomic_inc(&xprt->sc_dma_used); ctxt->sge[0].lkey = xprt->sc_dma_lkey; ctxt->sge[0].length = length; /* Prepare SEND WR */ memset(&err_wr, 0, sizeof err_wr); ctxt->wr_op = IB_WR_SEND; err_wr.wr_id = (unsigned long)ctxt; err_wr.sg_list = ctxt->sge; err_wr.num_sge = 1; err_wr.opcode = IB_WR_SEND; err_wr.send_flags = IB_SEND_SIGNALED; /* Post It */ ret = svc_rdma_send(xprt, &err_wr); if (ret) { dprintk("svcrdma: Error %d posting send for protocol error\n", ret); svc_rdma_unmap_dma(ctxt); svc_rdma_put_context(ctxt, 1); } }
gpl-2.0
Kali-/android_kernel_sony_msm8974pro
drivers/watchdog/mpc8xxx_wdt.c
4937
8281
/* * mpc8xxx_wdt.c - MPC8xx/MPC83xx/MPC86xx watchdog userspace interface * * Authors: Dave Updegraff <dave@cray.org> * Kumar Gala <galak@kernel.crashing.org> * Attribution: from 83xx_wst: Florian Schirmer <jolt@tuxbox.org> * ..and from sc520_wdt * Copyright (c) 2008 MontaVista Software, Inc. * Anton Vorontsov <avorontsov@ru.mvista.com> * * Note: it appears that you can only actually ENABLE or DISABLE the thing * once after POR. Once enabled, you cannot disable, and vice versa. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/miscdevice.h> #include <linux/of_platform.h> #include <linux/module.h> #include <linux/watchdog.h> #include <linux/io.h> #include <linux/uaccess.h> #include <sysdev/fsl_soc.h> struct mpc8xxx_wdt { __be32 res0; __be32 swcrr; /* System watchdog control register */ #define SWCRR_SWTC 0xFFFF0000 /* Software Watchdog Time Count. */ #define SWCRR_SWEN 0x00000004 /* Watchdog Enable bit. */ #define SWCRR_SWRI 0x00000002 /* Software Watchdog Reset/Interrupt Select bit.*/ #define SWCRR_SWPR 0x00000001 /* Software Watchdog Counter Prescale bit. */ __be32 swcnr; /* System watchdog count register */ u8 res1[2]; __be16 swsrr; /* System watchdog service register */ u8 res2[0xF0]; }; struct mpc8xxx_wdt_type { int prescaler; bool hw_enabled; }; static struct mpc8xxx_wdt __iomem *wd_base; static int mpc8xxx_wdt_init_late(void); static u16 timeout = 0xffff; module_param(timeout, ushort, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in ticks. (0<timeout<65536, default=65535)"); static bool reset = 1; module_param(reset, bool, 0); MODULE_PARM_DESC(reset, "Watchdog Interrupt/Reset Mode. 0 = interrupt, 1 = reset"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* * We always prescale, but if someone really doesn't want to they can set this * to 0 */ static int prescale = 1; static unsigned int timeout_sec; static unsigned long wdt_is_open; static DEFINE_SPINLOCK(wdt_spinlock); static void mpc8xxx_wdt_keepalive(void) { /* Ping the WDT */ spin_lock(&wdt_spinlock); out_be16(&wd_base->swsrr, 0x556c); out_be16(&wd_base->swsrr, 0xaa39); spin_unlock(&wdt_spinlock); } static void mpc8xxx_wdt_timer_ping(unsigned long arg); static DEFINE_TIMER(wdt_timer, mpc8xxx_wdt_timer_ping, 0, 0); static void mpc8xxx_wdt_timer_ping(unsigned long arg) { mpc8xxx_wdt_keepalive(); /* We're pinging it twice faster than needed, just to be sure. */ mod_timer(&wdt_timer, jiffies + HZ * timeout_sec / 2); } static void mpc8xxx_wdt_pr_warn(const char *msg) { pr_crit("%s, expect the %s soon!\n", msg, reset ? "reset" : "machine check exception"); } static ssize_t mpc8xxx_wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { if (count) mpc8xxx_wdt_keepalive(); return count; } static int mpc8xxx_wdt_open(struct inode *inode, struct file *file) { u32 tmp = SWCRR_SWEN; if (test_and_set_bit(0, &wdt_is_open)) return -EBUSY; /* Once we start the watchdog we can't stop it */ if (nowayout) __module_get(THIS_MODULE); /* Good, fire up the show */ if (prescale) tmp |= SWCRR_SWPR; if (reset) tmp |= SWCRR_SWRI; tmp |= timeout << 16; out_be32(&wd_base->swcrr, tmp); del_timer_sync(&wdt_timer); return nonseekable_open(inode, file); } static int mpc8xxx_wdt_release(struct inode *inode, struct file *file) { if (!nowayout) mpc8xxx_wdt_timer_ping(0); else mpc8xxx_wdt_pr_warn("watchdog closed"); clear_bit(0, &wdt_is_open); return 0; } static long mpc8xxx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING, .firmware_version = 1, .identity = "MPC8xxx", }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_KEEPALIVE: mpc8xxx_wdt_keepalive(); return 0; case WDIOC_GETTIMEOUT: return put_user(timeout_sec, p); default: return -ENOTTY; } } static const struct file_operations mpc8xxx_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = mpc8xxx_wdt_write, .unlocked_ioctl = mpc8xxx_wdt_ioctl, .open = mpc8xxx_wdt_open, .release = mpc8xxx_wdt_release, }; static struct miscdevice mpc8xxx_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &mpc8xxx_wdt_fops, }; static const struct of_device_id mpc8xxx_wdt_match[]; static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev) { int ret; const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct mpc8xxx_wdt_type *wdt_type; u32 freq = fsl_get_sys_freq(); bool enabled; match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev); if (!match) return -EINVAL; wdt_type = match->data; if (!freq || freq == -1) return -EINVAL; wd_base = of_iomap(np, 0); if (!wd_base) return -ENOMEM; enabled = in_be32(&wd_base->swcrr) & SWCRR_SWEN; if (!enabled && wdt_type->hw_enabled) { pr_info("could not be enabled in software\n"); ret = -ENOSYS; goto err_unmap; } /* Calculate the timeout in seconds */ if (prescale) timeout_sec = (timeout * wdt_type->prescaler) / freq; else timeout_sec = timeout / freq; #ifdef MODULE ret = mpc8xxx_wdt_init_late(); if (ret) goto err_unmap; #endif pr_info("WDT driver for MPC8xxx initialized. mode:%s timeout=%d (%d seconds)\n", reset ? "reset" : "interrupt", timeout, timeout_sec); /* * If the watchdog was previously enabled or we're running on * MPC8xxx, we should ping the wdt from the kernel until the * userspace handles it. */ if (enabled) mpc8xxx_wdt_timer_ping(0); return 0; err_unmap: iounmap(wd_base); wd_base = NULL; return ret; } static int __devexit mpc8xxx_wdt_remove(struct platform_device *ofdev) { mpc8xxx_wdt_pr_warn("watchdog removed"); del_timer_sync(&wdt_timer); misc_deregister(&mpc8xxx_wdt_miscdev); iounmap(wd_base); return 0; } static const struct of_device_id mpc8xxx_wdt_match[] = { { .compatible = "mpc83xx_wdt", .data = &(struct mpc8xxx_wdt_type) { .prescaler = 0x10000, }, }, { .compatible = "fsl,mpc8610-wdt", .data = &(struct mpc8xxx_wdt_type) { .prescaler = 0x10000, .hw_enabled = true, }, }, { .compatible = "fsl,mpc823-wdt", .data = &(struct mpc8xxx_wdt_type) { .prescaler = 0x800, }, }, {}, }; MODULE_DEVICE_TABLE(of, mpc8xxx_wdt_match); static struct platform_driver mpc8xxx_wdt_driver = { .probe = mpc8xxx_wdt_probe, .remove = __devexit_p(mpc8xxx_wdt_remove), .driver = { .name = "mpc8xxx_wdt", .owner = THIS_MODULE, .of_match_table = mpc8xxx_wdt_match, }, }; /* * We do wdt initialization in two steps: arch_initcall probes the wdt * very early to start pinging the watchdog (misc devices are not yet * available), and later module_init() just registers the misc device. */ static int mpc8xxx_wdt_init_late(void) { int ret; if (!wd_base) return -ENODEV; ret = misc_register(&mpc8xxx_wdt_miscdev); if (ret) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); return ret; } return 0; } #ifndef MODULE module_init(mpc8xxx_wdt_init_late); #endif static int __init mpc8xxx_wdt_init(void) { return platform_driver_register(&mpc8xxx_wdt_driver); } arch_initcall(mpc8xxx_wdt_init); static void __exit mpc8xxx_wdt_exit(void) { platform_driver_unregister(&mpc8xxx_wdt_driver); } module_exit(mpc8xxx_wdt_exit); MODULE_AUTHOR("Dave Updegraff, Kumar Gala"); MODULE_DESCRIPTION("Driver for watchdog timer in MPC8xx/MPC83xx/MPC86xx " "uProcessors"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
necioerrante/kernel
security/selinux/ss/conditional.c
5961
14229
/* Authors: Karl MacMillan <kmacmillan@tresys.com> * Frank Mayer <mayerf@tresys.com> * * Copyright (C) 2003 - 2004 Tresys Technology, LLC * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/spinlock.h> #include <linux/slab.h> #include "security.h" #include "conditional.h" /* * cond_evaluate_expr evaluates a conditional expr * in reverse polish notation. It returns true (1), false (0), * or undefined (-1). Undefined occurs when the expression * exceeds the stack depth of COND_EXPR_MAXDEPTH. */ static int cond_evaluate_expr(struct policydb *p, struct cond_expr *expr) { struct cond_expr *cur; int s[COND_EXPR_MAXDEPTH]; int sp = -1; for (cur = expr; cur; cur = cur->next) { switch (cur->expr_type) { case COND_BOOL: if (sp == (COND_EXPR_MAXDEPTH - 1)) return -1; sp++; s[sp] = p->bool_val_to_struct[cur->bool - 1]->state; break; case COND_NOT: if (sp < 0) return -1; s[sp] = !s[sp]; break; case COND_OR: if (sp < 1) return -1; sp--; s[sp] |= s[sp + 1]; break; case COND_AND: if (sp < 1) return -1; sp--; s[sp] &= s[sp + 1]; break; case COND_XOR: if (sp < 1) return -1; sp--; s[sp] ^= s[sp + 1]; break; case COND_EQ: if (sp < 1) return -1; sp--; s[sp] = (s[sp] == s[sp + 1]); break; case COND_NEQ: if (sp < 1) return -1; sp--; s[sp] = (s[sp] != s[sp + 1]); break; default: return -1; } } return s[0]; } /* * evaluate_cond_node evaluates the conditional stored in * a struct cond_node and if the result is different than the * current state of the node it sets the rules in the true/false * list appropriately. If the result of the expression is undefined * all of the rules are disabled for safety. */ int evaluate_cond_node(struct policydb *p, struct cond_node *node) { int new_state; struct cond_av_list *cur; new_state = cond_evaluate_expr(p, node->expr); if (new_state != node->cur_state) { node->cur_state = new_state; if (new_state == -1) printk(KERN_ERR "SELinux: expression result was undefined - disabling all rules.\n"); /* turn the rules on or off */ for (cur = node->true_list; cur; cur = cur->next) { if (new_state <= 0) cur->node->key.specified &= ~AVTAB_ENABLED; else cur->node->key.specified |= AVTAB_ENABLED; } for (cur = node->false_list; cur; cur = cur->next) { /* -1 or 1 */ if (new_state) cur->node->key.specified &= ~AVTAB_ENABLED; else cur->node->key.specified |= AVTAB_ENABLED; } } return 0; } int cond_policydb_init(struct policydb *p) { int rc; p->bool_val_to_struct = NULL; p->cond_list = NULL; rc = avtab_init(&p->te_cond_avtab); if (rc) return rc; return 0; } static void cond_av_list_destroy(struct cond_av_list *list) { struct cond_av_list *cur, *next; for (cur = list; cur; cur = next) { next = cur->next; /* the avtab_ptr_t node is destroy by the avtab */ kfree(cur); } } static void cond_node_destroy(struct cond_node *node) { struct cond_expr *cur_expr, *next_expr; for (cur_expr = node->expr; cur_expr; cur_expr = next_expr) { next_expr = cur_expr->next; kfree(cur_expr); } cond_av_list_destroy(node->true_list); cond_av_list_destroy(node->false_list); kfree(node); } static void cond_list_destroy(struct cond_node *list) { struct cond_node *next, *cur; if (list == NULL) return; for (cur = list; cur; cur = next) { next = cur->next; cond_node_destroy(cur); } } void cond_policydb_destroy(struct policydb *p) { kfree(p->bool_val_to_struct); avtab_destroy(&p->te_cond_avtab); cond_list_destroy(p->cond_list); } int cond_init_bool_indexes(struct policydb *p) { kfree(p->bool_val_to_struct); p->bool_val_to_struct = kmalloc(p->p_bools.nprim * sizeof(struct cond_bool_datum *), GFP_KERNEL); if (!p->bool_val_to_struct) return -ENOMEM; return 0; } int cond_destroy_bool(void *key, void *datum, void *p) { kfree(key); kfree(datum); return 0; } int cond_index_bool(void *key, void *datum, void *datap) { struct policydb *p; struct cond_bool_datum *booldatum; struct flex_array *fa; booldatum = datum; p = datap; if (!booldatum->value || booldatum->value > p->p_bools.nprim) return -EINVAL; fa = p->sym_val_to_name[SYM_BOOLS]; if (flex_array_put_ptr(fa, booldatum->value - 1, key, GFP_KERNEL | __GFP_ZERO)) BUG(); p->bool_val_to_struct[booldatum->value - 1] = booldatum; return 0; } static int bool_isvalid(struct cond_bool_datum *b) { if (!(b->state == 0 || b->state == 1)) return 0; return 1; } int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp) { char *key = NULL; struct cond_bool_datum *booldatum; __le32 buf[3]; u32 len; int rc; booldatum = kzalloc(sizeof(struct cond_bool_datum), GFP_KERNEL); if (!booldatum) return -ENOMEM; rc = next_entry(buf, fp, sizeof buf); if (rc) goto err; booldatum->value = le32_to_cpu(buf[0]); booldatum->state = le32_to_cpu(buf[1]); rc = -EINVAL; if (!bool_isvalid(booldatum)) goto err; len = le32_to_cpu(buf[2]); rc = -ENOMEM; key = kmalloc(len + 1, GFP_KERNEL); if (!key) goto err; rc = next_entry(key, fp, len); if (rc) goto err; key[len] = '\0'; rc = hashtab_insert(h, key, booldatum); if (rc) goto err; return 0; err: cond_destroy_bool(key, booldatum, NULL); return rc; } struct cond_insertf_data { struct policydb *p; struct cond_av_list *other; struct cond_av_list *head; struct cond_av_list *tail; }; static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum *d, void *ptr) { struct cond_insertf_data *data = ptr; struct policydb *p = data->p; struct cond_av_list *other = data->other, *list, *cur; struct avtab_node *node_ptr; u8 found; int rc = -EINVAL; /* * For type rules we have to make certain there aren't any * conflicting rules by searching the te_avtab and the * cond_te_avtab. */ if (k->specified & AVTAB_TYPE) { if (avtab_search(&p->te_avtab, k)) { printk(KERN_ERR "SELinux: type rule already exists outside of a conditional.\n"); goto err; } /* * If we are reading the false list other will be a pointer to * the true list. We can have duplicate entries if there is only * 1 other entry and it is in our true list. * * If we are reading the true list (other == NULL) there shouldn't * be any other entries. */ if (other) { node_ptr = avtab_search_node(&p->te_cond_avtab, k); if (node_ptr) { if (avtab_search_node_next(node_ptr, k->specified)) { printk(KERN_ERR "SELinux: too many conflicting type rules.\n"); goto err; } found = 0; for (cur = other; cur; cur = cur->next) { if (cur->node == node_ptr) { found = 1; break; } } if (!found) { printk(KERN_ERR "SELinux: conflicting type rules.\n"); goto err; } } } else { if (avtab_search(&p->te_cond_avtab, k)) { printk(KERN_ERR "SELinux: conflicting type rules when adding type rule for true.\n"); goto err; } } } node_ptr = avtab_insert_nonunique(&p->te_cond_avtab, k, d); if (!node_ptr) { printk(KERN_ERR "SELinux: could not insert rule.\n"); rc = -ENOMEM; goto err; } list = kzalloc(sizeof(struct cond_av_list), GFP_KERNEL); if (!list) { rc = -ENOMEM; goto err; } list->node = node_ptr; if (!data->head) data->head = list; else data->tail->next = list; data->tail = list; return 0; err: cond_av_list_destroy(data->head); data->head = NULL; return rc; } static int cond_read_av_list(struct policydb *p, void *fp, struct cond_av_list **ret_list, struct cond_av_list *other) { int i, rc; __le32 buf[1]; u32 len; struct cond_insertf_data data; *ret_list = NULL; len = 0; rc = next_entry(buf, fp, sizeof(u32)); if (rc) return rc; len = le32_to_cpu(buf[0]); if (len == 0) return 0; data.p = p; data.other = other; data.head = NULL; data.tail = NULL; for (i = 0; i < len; i++) { rc = avtab_read_item(&p->te_cond_avtab, fp, p, cond_insertf, &data); if (rc) return rc; } *ret_list = data.head; return 0; } static int expr_isvalid(struct policydb *p, struct cond_expr *expr) { if (expr->expr_type <= 0 || expr->expr_type > COND_LAST) { printk(KERN_ERR "SELinux: conditional expressions uses unknown operator.\n"); return 0; } if (expr->bool > p->p_bools.nprim) { printk(KERN_ERR "SELinux: conditional expressions uses unknown bool.\n"); return 0; } return 1; } static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp) { __le32 buf[2]; u32 len, i; int rc; struct cond_expr *expr = NULL, *last = NULL; rc = next_entry(buf, fp, sizeof(u32)); if (rc) return rc; node->cur_state = le32_to_cpu(buf[0]); len = 0; rc = next_entry(buf, fp, sizeof(u32)); if (rc) return rc; /* expr */ len = le32_to_cpu(buf[0]); for (i = 0; i < len; i++) { rc = next_entry(buf, fp, sizeof(u32) * 2); if (rc) goto err; rc = -ENOMEM; expr = kzalloc(sizeof(struct cond_expr), GFP_KERNEL); if (!expr) goto err; expr->expr_type = le32_to_cpu(buf[0]); expr->bool = le32_to_cpu(buf[1]); if (!expr_isvalid(p, expr)) { rc = -EINVAL; kfree(expr); goto err; } if (i == 0) node->expr = expr; else last->next = expr; last = expr; } rc = cond_read_av_list(p, fp, &node->true_list, NULL); if (rc) goto err; rc = cond_read_av_list(p, fp, &node->false_list, node->true_list); if (rc) goto err; return 0; err: cond_node_destroy(node); return rc; } int cond_read_list(struct policydb *p, void *fp) { struct cond_node *node, *last = NULL; __le32 buf[1]; u32 i, len; int rc; rc = next_entry(buf, fp, sizeof buf); if (rc) return rc; len = le32_to_cpu(buf[0]); rc = avtab_alloc(&(p->te_cond_avtab), p->te_avtab.nel); if (rc) goto err; for (i = 0; i < len; i++) { rc = -ENOMEM; node = kzalloc(sizeof(struct cond_node), GFP_KERNEL); if (!node) goto err; rc = cond_read_node(p, node, fp); if (rc) goto err; if (i == 0) p->cond_list = node; else last->next = node; last = node; } return 0; err: cond_list_destroy(p->cond_list); p->cond_list = NULL; return rc; } int cond_write_bool(void *vkey, void *datum, void *ptr) { char *key = vkey; struct cond_bool_datum *booldatum = datum; struct policy_data *pd = ptr; void *fp = pd->fp; __le32 buf[3]; u32 len; int rc; len = strlen(key); buf[0] = cpu_to_le32(booldatum->value); buf[1] = cpu_to_le32(booldatum->state); buf[2] = cpu_to_le32(len); rc = put_entry(buf, sizeof(u32), 3, fp); if (rc) return rc; rc = put_entry(key, 1, len, fp); if (rc) return rc; return 0; } /* * cond_write_cond_av_list doesn't write out the av_list nodes. * Instead it writes out the key/value pairs from the avtab. This * is necessary because there is no way to uniquely identifying rules * in the avtab so it is not possible to associate individual rules * in the avtab with a conditional without saving them as part of * the conditional. This means that the avtab with the conditional * rules will not be saved but will be rebuilt on policy load. */ static int cond_write_av_list(struct policydb *p, struct cond_av_list *list, struct policy_file *fp) { __le32 buf[1]; struct cond_av_list *cur_list; u32 len; int rc; len = 0; for (cur_list = list; cur_list != NULL; cur_list = cur_list->next) len++; buf[0] = cpu_to_le32(len); rc = put_entry(buf, sizeof(u32), 1, fp); if (rc) return rc; if (len == 0) return 0; for (cur_list = list; cur_list != NULL; cur_list = cur_list->next) { rc = avtab_write_item(p, cur_list->node, fp); if (rc) return rc; } return 0; } static int cond_write_node(struct policydb *p, struct cond_node *node, struct policy_file *fp) { struct cond_expr *cur_expr; __le32 buf[2]; int rc; u32 len = 0; buf[0] = cpu_to_le32(node->cur_state); rc = put_entry(buf, sizeof(u32), 1, fp); if (rc) return rc; for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next) len++; buf[0] = cpu_to_le32(len); rc = put_entry(buf, sizeof(u32), 1, fp); if (rc) return rc; for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next) { buf[0] = cpu_to_le32(cur_expr->expr_type); buf[1] = cpu_to_le32(cur_expr->bool); rc = put_entry(buf, sizeof(u32), 2, fp); if (rc) return rc; } rc = cond_write_av_list(p, node->true_list, fp); if (rc) return rc; rc = cond_write_av_list(p, node->false_list, fp); if (rc) return rc; return 0; } int cond_write_list(struct policydb *p, struct cond_node *list, void *fp) { struct cond_node *cur; u32 len; __le32 buf[1]; int rc; len = 0; for (cur = list; cur != NULL; cur = cur->next) len++; buf[0] = cpu_to_le32(len); rc = put_entry(buf, sizeof(u32), 1, fp); if (rc) return rc; for (cur = list; cur != NULL; cur = cur->next) { rc = cond_write_node(p, cur, fp); if (rc) return rc; } return 0; } /* Determine whether additional permissions are granted by the conditional * av table, and if so, add them to the result */ void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd) { struct avtab_node *node; if (!ctab || !key || !avd) return; for (node = avtab_search_node(ctab, key); node; node = avtab_search_node_next(node, key->specified)) { if ((u16)(AVTAB_ALLOWED|AVTAB_ENABLED) == (node->key.specified & (AVTAB_ALLOWED|AVTAB_ENABLED))) avd->allowed |= node->datum.data; if ((u16)(AVTAB_AUDITDENY|AVTAB_ENABLED) == (node->key.specified & (AVTAB_AUDITDENY|AVTAB_ENABLED))) /* Since a '0' in an auditdeny mask represents a * permission we do NOT want to audit (dontaudit), we use * the '&' operand to ensure that all '0's in the mask * are retained (much unlike the allow and auditallow cases). */ avd->auditdeny &= node->datum.data; if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) == (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED))) avd->auditallow |= node->datum.data; } return; }
gpl-2.0
XxXPachaXxX/PachaRX-VS4-3.0.16
arch/mips/lasat/ds1603.c
13641
3192
/* * Dallas Semiconductors 1603 RTC driver * * Brian Murphy <brian@murphy.dk> * */ #include <linux/kernel.h> #include <asm/lasat/lasat.h> #include <linux/delay.h> #include <asm/lasat/ds1603.h> #include <asm/time.h> #include "ds1603.h" #define READ_TIME_CMD 0x81 #define SET_TIME_CMD 0x80 #define TRIMMER_SET_CMD 0xC0 #define TRIMMER_VALUE_MASK 0x38 #define TRIMMER_SHIFT 3 struct ds_defs *ds1603; /* HW specific register functions */ static void rtc_reg_write(unsigned long val) { *ds1603->reg = val; } static unsigned long rtc_reg_read(void) { unsigned long tmp = *ds1603->reg; return tmp; } static unsigned long rtc_datareg_read(void) { unsigned long tmp = *ds1603->data_reg; return tmp; } static void rtc_nrst_high(void) { rtc_reg_write(rtc_reg_read() | ds1603->rst); } static void rtc_nrst_low(void) { rtc_reg_write(rtc_reg_read() & ~ds1603->rst); } static void rtc_cycle_clock(unsigned long data) { data |= ds1603->clk; rtc_reg_write(data); lasat_ndelay(250); if (ds1603->data_reversed) data &= ~ds1603->data; else data |= ds1603->data; data &= ~ds1603->clk; rtc_reg_write(data); lasat_ndelay(250 + ds1603->huge_delay); } static void rtc_write_databit(unsigned int bit) { unsigned long data = rtc_reg_read(); if (ds1603->data_reversed) bit = !bit; if (bit) data |= ds1603->data; else data &= ~ds1603->data; rtc_reg_write(data); lasat_ndelay(50 + ds1603->huge_delay); rtc_cycle_clock(data); } static unsigned int rtc_read_databit(void) { unsigned int data; data = (rtc_datareg_read() & (1 << ds1603->data_read_shift)) >> ds1603->data_read_shift; rtc_cycle_clock(rtc_reg_read()); return data; } static void rtc_write_byte(unsigned int byte) { int i; for (i = 0; i <= 7; i++) { rtc_write_databit(byte & 1L); byte >>= 1; } } static void rtc_write_word(unsigned long word) { int i; for (i = 0; i <= 31; i++) { rtc_write_databit(word & 1L); word >>= 1; } } static unsigned long rtc_read_word(void) { int i; unsigned long word = 0; unsigned long shift = 0; for (i = 0; i <= 31; i++) { word |= rtc_read_databit() << shift; shift++; } return word; } static void rtc_init_op(void) { rtc_nrst_high(); rtc_reg_write(rtc_reg_read() & ~ds1603->clk); lasat_ndelay(50); } static void rtc_end_op(void) { rtc_nrst_low(); lasat_ndelay(1000); } void read_persistent_clock(struct timespec *ts) { unsigned long word; unsigned long flags; spin_lock_irqsave(&rtc_lock, flags); rtc_init_op(); rtc_write_byte(READ_TIME_CMD); word = rtc_read_word(); rtc_end_op(); spin_unlock_irqrestore(&rtc_lock, flags); ts->tv_sec = word; ts->tv_nsec = 0; } int rtc_mips_set_mmss(unsigned long time) { unsigned long flags; spin_lock_irqsave(&rtc_lock, flags); rtc_init_op(); rtc_write_byte(SET_TIME_CMD); rtc_write_word(time); rtc_end_op(); spin_unlock_irqrestore(&rtc_lock, flags); return 0; } void ds1603_set_trimmer(unsigned int trimval) { rtc_init_op(); rtc_write_byte(((trimval << TRIMMER_SHIFT) & TRIMMER_VALUE_MASK) | (TRIMMER_SET_CMD)); rtc_end_op(); } void ds1603_disable(void) { ds1603_set_trimmer(TRIMMER_DISABLE_RTC); } void ds1603_enable(void) { ds1603_set_trimmer(TRIMMER_DEFAULT); }
gpl-2.0
Altaf-Mahdi/bacon
drivers/media/video/cx18/cx18-scb.c
13897
5794
/* * cx18 System Control Block initialization * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307 USA */ #include "cx18-driver.h" #include "cx18-io.h" #include "cx18-scb.h" void cx18_init_scb(struct cx18 *cx) { cx18_setup_page(cx, SCB_OFFSET); cx18_memset_io(cx, cx->scb, 0, 0x10000); cx18_writel(cx, IRQ_APU_TO_CPU, &cx->scb->apu2cpu_irq); cx18_writel(cx, IRQ_CPU_TO_APU_ACK, &cx->scb->cpu2apu_irq_ack); cx18_writel(cx, IRQ_HPU_TO_CPU, &cx->scb->hpu2cpu_irq); cx18_writel(cx, IRQ_CPU_TO_HPU_ACK, &cx->scb->cpu2hpu_irq_ack); cx18_writel(cx, IRQ_PPU_TO_CPU, &cx->scb->ppu2cpu_irq); cx18_writel(cx, IRQ_CPU_TO_PPU_ACK, &cx->scb->cpu2ppu_irq_ack); cx18_writel(cx, IRQ_EPU_TO_CPU, &cx->scb->epu2cpu_irq); cx18_writel(cx, IRQ_CPU_TO_EPU_ACK, &cx->scb->cpu2epu_irq_ack); cx18_writel(cx, IRQ_CPU_TO_APU, &cx->scb->cpu2apu_irq); cx18_writel(cx, IRQ_APU_TO_CPU_ACK, &cx->scb->apu2cpu_irq_ack); cx18_writel(cx, IRQ_HPU_TO_APU, &cx->scb->hpu2apu_irq); cx18_writel(cx, IRQ_APU_TO_HPU_ACK, &cx->scb->apu2hpu_irq_ack); cx18_writel(cx, IRQ_PPU_TO_APU, &cx->scb->ppu2apu_irq); cx18_writel(cx, IRQ_APU_TO_PPU_ACK, &cx->scb->apu2ppu_irq_ack); cx18_writel(cx, IRQ_EPU_TO_APU, &cx->scb->epu2apu_irq); cx18_writel(cx, IRQ_APU_TO_EPU_ACK, &cx->scb->apu2epu_irq_ack); cx18_writel(cx, IRQ_CPU_TO_HPU, &cx->scb->cpu2hpu_irq); cx18_writel(cx, IRQ_HPU_TO_CPU_ACK, &cx->scb->hpu2cpu_irq_ack); cx18_writel(cx, IRQ_APU_TO_HPU, &cx->scb->apu2hpu_irq); cx18_writel(cx, IRQ_HPU_TO_APU_ACK, &cx->scb->hpu2apu_irq_ack); cx18_writel(cx, IRQ_PPU_TO_HPU, &cx->scb->ppu2hpu_irq); cx18_writel(cx, IRQ_HPU_TO_PPU_ACK, &cx->scb->hpu2ppu_irq_ack); cx18_writel(cx, IRQ_EPU_TO_HPU, &cx->scb->epu2hpu_irq); cx18_writel(cx, IRQ_HPU_TO_EPU_ACK, &cx->scb->hpu2epu_irq_ack); cx18_writel(cx, IRQ_CPU_TO_PPU, &cx->scb->cpu2ppu_irq); cx18_writel(cx, IRQ_PPU_TO_CPU_ACK, &cx->scb->ppu2cpu_irq_ack); cx18_writel(cx, IRQ_APU_TO_PPU, &cx->scb->apu2ppu_irq); cx18_writel(cx, IRQ_PPU_TO_APU_ACK, &cx->scb->ppu2apu_irq_ack); cx18_writel(cx, IRQ_HPU_TO_PPU, &cx->scb->hpu2ppu_irq); cx18_writel(cx, IRQ_PPU_TO_HPU_ACK, &cx->scb->ppu2hpu_irq_ack); cx18_writel(cx, IRQ_EPU_TO_PPU, &cx->scb->epu2ppu_irq); cx18_writel(cx, IRQ_PPU_TO_EPU_ACK, &cx->scb->ppu2epu_irq_ack); cx18_writel(cx, IRQ_CPU_TO_EPU, &cx->scb->cpu2epu_irq); cx18_writel(cx, IRQ_EPU_TO_CPU_ACK, &cx->scb->epu2cpu_irq_ack); cx18_writel(cx, IRQ_APU_TO_EPU, &cx->scb->apu2epu_irq); cx18_writel(cx, IRQ_EPU_TO_APU_ACK, &cx->scb->epu2apu_irq_ack); cx18_writel(cx, IRQ_HPU_TO_EPU, &cx->scb->hpu2epu_irq); cx18_writel(cx, IRQ_EPU_TO_HPU_ACK, &cx->scb->epu2hpu_irq_ack); cx18_writel(cx, IRQ_PPU_TO_EPU, &cx->scb->ppu2epu_irq); cx18_writel(cx, IRQ_EPU_TO_PPU_ACK, &cx->scb->epu2ppu_irq_ack); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, apu2cpu_mb), &cx->scb->apu2cpu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, hpu2cpu_mb), &cx->scb->hpu2cpu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, ppu2cpu_mb), &cx->scb->ppu2cpu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, epu2cpu_mb), &cx->scb->epu2cpu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, cpu2apu_mb), &cx->scb->cpu2apu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, hpu2apu_mb), &cx->scb->hpu2apu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, ppu2apu_mb), &cx->scb->ppu2apu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, epu2apu_mb), &cx->scb->epu2apu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, cpu2hpu_mb), &cx->scb->cpu2hpu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, apu2hpu_mb), &cx->scb->apu2hpu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, ppu2hpu_mb), &cx->scb->ppu2hpu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, epu2hpu_mb), &cx->scb->epu2hpu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, cpu2ppu_mb), &cx->scb->cpu2ppu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, apu2ppu_mb), &cx->scb->apu2ppu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, hpu2ppu_mb), &cx->scb->hpu2ppu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, epu2ppu_mb), &cx->scb->epu2ppu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, cpu2epu_mb), &cx->scb->cpu2epu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, apu2epu_mb), &cx->scb->apu2epu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, hpu2epu_mb), &cx->scb->hpu2epu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, ppu2epu_mb), &cx->scb->ppu2epu_mb_offset); cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, cpu_state), &cx->scb->ipc_offset); cx18_writel(cx, 1, &cx->scb->epu_state); }
gpl-2.0
tprrt/linux-stable
drivers/gpu/drm/arm/malidp_drv.c
74
27760
// SPDX-License-Identifier: GPL-2.0-only /* * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. * Author: Liviu Dudau <Liviu.Dudau@arm.com> * * ARM Mali DP500/DP550/DP650 KMS/DRM driver */ #include <linux/module.h> #include <linux/clk.h> #include <linux/component.h> #include <linux/of_device.h> #include <linux/of_graph.h> #include <linux/of_reserved_mem.h> #include <linux/pm_runtime.h> #include <linux/debugfs.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_drv.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_modeset_helper.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include "malidp_drv.h" #include "malidp_mw.h" #include "malidp_regs.h" #include "malidp_hw.h" #define MALIDP_CONF_VALID_TIMEOUT 250 #define AFBC_HEADER_SIZE 16 #define AFBC_SUPERBLK_ALIGNMENT 128 static void malidp_write_gamma_table(struct malidp_hw_device *hwdev, u32 data[MALIDP_COEFFTAB_NUM_COEFFS]) { int i; /* Update all channels with a single gamma curve. */ const u32 gamma_write_mask = GENMASK(18, 16); /* * Always write an entire table, so the address field in * DE_COEFFTAB_ADDR is 0 and we can use the gamma_write_mask bitmask * directly. */ malidp_hw_write(hwdev, gamma_write_mask, hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_ADDR); for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i) malidp_hw_write(hwdev, data[i], hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_DATA); } static void malidp_atomic_commit_update_gamma(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct malidp_drm *malidp = crtc_to_malidp_device(crtc); struct malidp_hw_device *hwdev = malidp->dev; if (!crtc->state->color_mgmt_changed) return; if (!crtc->state->gamma_lut) { malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_GAMMA, MALIDP_DE_DISPLAY_FUNC); } else { struct malidp_crtc_state *mc = to_malidp_crtc_state(crtc->state); if (!old_state->gamma_lut || (crtc->state->gamma_lut->base.id != old_state->gamma_lut->base.id)) malidp_write_gamma_table(hwdev, mc->gamma_coeffs); malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_GAMMA, MALIDP_DE_DISPLAY_FUNC); } } static void malidp_atomic_commit_update_coloradj(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct malidp_drm *malidp = crtc_to_malidp_device(crtc); struct malidp_hw_device *hwdev = malidp->dev; int i; if (!crtc->state->color_mgmt_changed) return; if (!crtc->state->ctm) { malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_CADJ, MALIDP_DE_DISPLAY_FUNC); } else { struct malidp_crtc_state *mc = to_malidp_crtc_state(crtc->state); if (!old_state->ctm || (crtc->state->ctm->base.id != old_state->ctm->base.id)) for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i) malidp_hw_write(hwdev, mc->coloradj_coeffs[i], hwdev->hw->map.coeffs_base + MALIDP_COLOR_ADJ_COEF + 4 * i); malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ, MALIDP_DE_DISPLAY_FUNC); } } static void malidp_atomic_commit_se_config(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct malidp_crtc_state *cs = to_malidp_crtc_state(crtc->state); struct malidp_crtc_state *old_cs = to_malidp_crtc_state(old_state); struct malidp_drm *malidp = crtc_to_malidp_device(crtc); struct malidp_hw_device *hwdev = malidp->dev; struct malidp_se_config *s = &cs->scaler_config; struct malidp_se_config *old_s = &old_cs->scaler_config; u32 se_control = hwdev->hw->map.se_base + ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ? 0x10 : 0xC); u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL; u32 scr = se_control + MALIDP_SE_SCALING_CONTROL; u32 val; /* Set SE_CONTROL */ if (!s->scale_enable) { val = malidp_hw_read(hwdev, se_control); val &= ~MALIDP_SE_SCALING_EN; malidp_hw_write(hwdev, val, se_control); return; } hwdev->hw->se_set_scaling_coeffs(hwdev, s, old_s); val = malidp_hw_read(hwdev, se_control); val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN; val &= ~MALIDP_SE_ENH(MALIDP_SE_ENH_MASK); val |= s->enhancer_enable ? MALIDP_SE_ENH(3) : 0; val |= MALIDP_SE_RGBO_IF_EN; malidp_hw_write(hwdev, val, se_control); /* Set IN_SIZE & OUT_SIZE. */ val = MALIDP_SE_SET_V_SIZE(s->input_h) | MALIDP_SE_SET_H_SIZE(s->input_w); malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_IN_SIZE); val = MALIDP_SE_SET_V_SIZE(s->output_h) | MALIDP_SE_SET_H_SIZE(s->output_w); malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_OUT_SIZE); /* Set phase regs. */ malidp_hw_write(hwdev, s->h_init_phase, scr + MALIDP_SE_H_INIT_PH); malidp_hw_write(hwdev, s->h_delta_phase, scr + MALIDP_SE_H_DELTA_PH); malidp_hw_write(hwdev, s->v_init_phase, scr + MALIDP_SE_V_INIT_PH); malidp_hw_write(hwdev, s->v_delta_phase, scr + MALIDP_SE_V_DELTA_PH); } /* * set the "config valid" bit and wait until the hardware acts on it */ static int malidp_set_and_wait_config_valid(struct drm_device *drm) { struct malidp_drm *malidp = drm->dev_private; struct malidp_hw_device *hwdev = malidp->dev; int ret; hwdev->hw->set_config_valid(hwdev, 1); /* don't wait for config_valid flag if we are in config mode */ if (hwdev->hw->in_config_mode(hwdev)) { atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_DONE); return 0; } ret = wait_event_interruptible_timeout(malidp->wq, atomic_read(&malidp->config_valid) == MALIDP_CONFIG_VALID_DONE, msecs_to_jiffies(MALIDP_CONF_VALID_TIMEOUT)); return (ret > 0) ? 0 : -ETIMEDOUT; } static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) { struct drm_device *drm = state->dev; struct malidp_drm *malidp = drm->dev_private; int loop = 5; malidp->event = malidp->crtc.state->event; malidp->crtc.state->event = NULL; if (malidp->crtc.state->active) { /* * if we have an event to deliver to userspace, make sure * the vblank is enabled as we are sending it from the IRQ * handler. */ if (malidp->event) drm_crtc_vblank_get(&malidp->crtc); /* only set config_valid if the CRTC is enabled */ if (malidp_set_and_wait_config_valid(drm) < 0) { /* * make a loop around the second CVAL setting and * try 5 times before giving up. */ while (loop--) { if (!malidp_set_and_wait_config_valid(drm)) break; } DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n"); } } else if (malidp->event) { /* CRTC inactive means vblank IRQ is disabled, send event directly */ spin_lock_irq(&drm->event_lock); drm_crtc_send_vblank_event(&malidp->crtc, malidp->event); malidp->event = NULL; spin_unlock_irq(&drm->event_lock); } drm_atomic_helper_commit_hw_done(state); } static void malidp_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *drm = state->dev; struct malidp_drm *malidp = drm->dev_private; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; int i; bool fence_cookie = dma_fence_begin_signalling(); pm_runtime_get_sync(drm->dev); /* * set config_valid to a special value to let IRQ handlers * know that we are updating registers */ atomic_set(&malidp->config_valid, MALIDP_CONFIG_START); malidp->dev->hw->set_config_valid(malidp->dev, 0); drm_atomic_helper_commit_modeset_disables(drm, state); for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { malidp_atomic_commit_update_gamma(crtc, old_crtc_state); malidp_atomic_commit_update_coloradj(crtc, old_crtc_state); malidp_atomic_commit_se_config(crtc, old_crtc_state); } drm_atomic_helper_commit_planes(drm, state, DRM_PLANE_COMMIT_ACTIVE_ONLY); malidp_mw_atomic_commit(drm, state); drm_atomic_helper_commit_modeset_enables(drm, state); malidp_atomic_commit_hw_done(state); dma_fence_end_signalling(fence_cookie); pm_runtime_put(drm->dev); drm_atomic_helper_cleanup_planes(drm, state); } static const struct drm_mode_config_helper_funcs malidp_mode_config_helpers = { .atomic_commit_tail = malidp_atomic_commit_tail, }; static bool malidp_verify_afbc_framebuffer_caps(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd) { if (malidp_format_mod_supported(dev, mode_cmd->pixel_format, mode_cmd->modifier[0]) == false) return false; if (mode_cmd->offsets[0] != 0) { DRM_DEBUG_KMS("AFBC buffers' plane offset should be 0\n"); return false; } switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) { case AFBC_SIZE_16X16: if ((mode_cmd->width % 16) || (mode_cmd->height % 16)) { DRM_DEBUG_KMS("AFBC buffers must be aligned to 16 pixels\n"); return false; } break; default: DRM_DEBUG_KMS("Unsupported AFBC block size\n"); return false; } return true; } static bool malidp_verify_afbc_framebuffer_size(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd) { int n_superblocks = 0; const struct drm_format_info *info; struct drm_gem_object *objs = NULL; u32 afbc_superblock_size = 0, afbc_superblock_height = 0; u32 afbc_superblock_width = 0, afbc_size = 0; int bpp = 0; switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) { case AFBC_SIZE_16X16: afbc_superblock_height = 16; afbc_superblock_width = 16; break; default: DRM_DEBUG_KMS("AFBC superblock size is not supported\n"); return false; } info = drm_get_format_info(dev, mode_cmd); n_superblocks = (mode_cmd->width / afbc_superblock_width) * (mode_cmd->height / afbc_superblock_height); bpp = malidp_format_get_bpp(info->format); afbc_superblock_size = (bpp * afbc_superblock_width * afbc_superblock_height) / BITS_PER_BYTE; afbc_size = ALIGN(n_superblocks * AFBC_HEADER_SIZE, AFBC_SUPERBLK_ALIGNMENT); afbc_size += n_superblocks * ALIGN(afbc_superblock_size, AFBC_SUPERBLK_ALIGNMENT); if ((mode_cmd->width * bpp) != (mode_cmd->pitches[0] * BITS_PER_BYTE)) { DRM_DEBUG_KMS("Invalid value of (pitch * BITS_PER_BYTE) (=%u) " "should be same as width (=%u) * bpp (=%u)\n", (mode_cmd->pitches[0] * BITS_PER_BYTE), mode_cmd->width, bpp); return false; } objs = drm_gem_object_lookup(file, mode_cmd->handles[0]); if (!objs) { DRM_DEBUG_KMS("Failed to lookup GEM object\n"); return false; } if (objs->size < afbc_size) { DRM_DEBUG_KMS("buffer size (%zu) too small for AFBC buffer size = %u\n", objs->size, afbc_size); drm_gem_object_put(objs); return false; } drm_gem_object_put(objs); return true; } static bool malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd) { if (malidp_verify_afbc_framebuffer_caps(dev, mode_cmd)) return malidp_verify_afbc_framebuffer_size(dev, file, mode_cmd); return false; } static struct drm_framebuffer * malidp_fb_create(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd) { if (mode_cmd->modifier[0]) { if (!malidp_verify_afbc_framebuffer(dev, file, mode_cmd)) return ERR_PTR(-EINVAL); } return drm_gem_fb_create(dev, file, mode_cmd); } static const struct drm_mode_config_funcs malidp_mode_config_funcs = { .fb_create = malidp_fb_create, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static int malidp_init(struct drm_device *drm) { int ret; struct malidp_drm *malidp = drm->dev_private; struct malidp_hw_device *hwdev = malidp->dev; drm_mode_config_init(drm); drm->mode_config.min_width = hwdev->min_line_size; drm->mode_config.min_height = hwdev->min_line_size; drm->mode_config.max_width = hwdev->max_line_size; drm->mode_config.max_height = hwdev->max_line_size; drm->mode_config.funcs = &malidp_mode_config_funcs; drm->mode_config.helper_private = &malidp_mode_config_helpers; ret = malidp_crtc_init(drm); if (ret) goto crtc_fail; ret = malidp_mw_connector_init(drm); if (ret) goto crtc_fail; return 0; crtc_fail: drm_mode_config_cleanup(drm); return ret; } static void malidp_fini(struct drm_device *drm) { drm_mode_config_cleanup(drm); } static int malidp_irq_init(struct platform_device *pdev) { int irq_de, irq_se, ret = 0; struct drm_device *drm = dev_get_drvdata(&pdev->dev); struct malidp_drm *malidp = drm->dev_private; struct malidp_hw_device *hwdev = malidp->dev; /* fetch the interrupts from DT */ irq_de = platform_get_irq_byname(pdev, "DE"); if (irq_de < 0) { DRM_ERROR("no 'DE' IRQ specified!\n"); return irq_de; } irq_se = platform_get_irq_byname(pdev, "SE"); if (irq_se < 0) { DRM_ERROR("no 'SE' IRQ specified!\n"); return irq_se; } ret = malidp_de_irq_init(drm, irq_de); if (ret) return ret; ret = malidp_se_irq_init(drm, irq_se); if (ret) { malidp_de_irq_fini(hwdev); return ret; } return 0; } DEFINE_DRM_GEM_CMA_FOPS(fops); static int malidp_dumb_create(struct drm_file *file_priv, struct drm_device *drm, struct drm_mode_create_dumb *args) { struct malidp_drm *malidp = drm->dev_private; /* allocate for the worst case scenario, i.e. rotated buffers */ u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 1); args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), alignment); return drm_gem_cma_dumb_create_internal(file_priv, drm, args); } #ifdef CONFIG_DEBUG_FS static void malidp_error_stats_init(struct malidp_error_stats *error_stats) { error_stats->num_errors = 0; error_stats->last_error_status = 0; error_stats->last_error_vblank = -1; } void malidp_error(struct malidp_drm *malidp, struct malidp_error_stats *error_stats, u32 status, u64 vblank) { unsigned long irqflags; spin_lock_irqsave(&malidp->errors_lock, irqflags); error_stats->last_error_status = status; error_stats->last_error_vblank = vblank; error_stats->num_errors++; spin_unlock_irqrestore(&malidp->errors_lock, irqflags); } static void malidp_error_stats_dump(const char *prefix, struct malidp_error_stats error_stats, struct seq_file *m) { seq_printf(m, "[%s] num_errors : %d\n", prefix, error_stats.num_errors); seq_printf(m, "[%s] last_error_status : 0x%08x\n", prefix, error_stats.last_error_status); seq_printf(m, "[%s] last_error_vblank : %lld\n", prefix, error_stats.last_error_vblank); } static int malidp_show_stats(struct seq_file *m, void *arg) { struct drm_device *drm = m->private; struct malidp_drm *malidp = drm->dev_private; unsigned long irqflags; struct malidp_error_stats de_errors, se_errors; spin_lock_irqsave(&malidp->errors_lock, irqflags); de_errors = malidp->de_errors; se_errors = malidp->se_errors; spin_unlock_irqrestore(&malidp->errors_lock, irqflags); malidp_error_stats_dump("DE", de_errors, m); malidp_error_stats_dump("SE", se_errors, m); return 0; } static int malidp_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, malidp_show_stats, inode->i_private); } static ssize_t malidp_debugfs_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { struct seq_file *m = file->private_data; struct drm_device *drm = m->private; struct malidp_drm *malidp = drm->dev_private; unsigned long irqflags; spin_lock_irqsave(&malidp->errors_lock, irqflags); malidp_error_stats_init(&malidp->de_errors); malidp_error_stats_init(&malidp->se_errors); spin_unlock_irqrestore(&malidp->errors_lock, irqflags); return len; } static const struct file_operations malidp_debugfs_fops = { .owner = THIS_MODULE, .open = malidp_debugfs_open, .read = seq_read, .write = malidp_debugfs_write, .llseek = seq_lseek, .release = single_release, }; static void malidp_debugfs_init(struct drm_minor *minor) { struct malidp_drm *malidp = minor->dev->dev_private; malidp_error_stats_init(&malidp->de_errors); malidp_error_stats_init(&malidp->se_errors); spin_lock_init(&malidp->errors_lock); debugfs_create_file("debug", S_IRUGO | S_IWUSR, minor->debugfs_root, minor->dev, &malidp_debugfs_fops); } #endif //CONFIG_DEBUG_FS static const struct drm_driver malidp_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create), #ifdef CONFIG_DEBUG_FS .debugfs_init = malidp_debugfs_init, #endif .fops = &fops, .name = "mali-dp", .desc = "ARM Mali Display Processor driver", .date = "20160106", .major = 1, .minor = 0, }; static const struct of_device_id malidp_drm_of_match[] = { { .compatible = "arm,mali-dp500", .data = &malidp_device[MALIDP_500] }, { .compatible = "arm,mali-dp550", .data = &malidp_device[MALIDP_550] }, { .compatible = "arm,mali-dp650", .data = &malidp_device[MALIDP_650] }, {}, }; MODULE_DEVICE_TABLE(of, malidp_drm_of_match); static bool malidp_is_compatible_hw_id(struct malidp_hw_device *hwdev, const struct of_device_id *dev_id) { u32 core_id; const char *compatstr_dp500 = "arm,mali-dp500"; bool is_dp500; bool dt_is_dp500; /* * The DP500 CORE_ID register is in a different location, so check it * first. If the product id field matches, then this is DP500, otherwise * check the DP550/650 CORE_ID register. */ core_id = malidp_hw_read(hwdev, MALIDP500_DC_BASE + MALIDP_DE_CORE_ID); /* Offset 0x18 will never read 0x500 on products other than DP500. */ is_dp500 = (MALIDP_PRODUCT_ID(core_id) == 0x500); dt_is_dp500 = strnstr(dev_id->compatible, compatstr_dp500, sizeof(dev_id->compatible)) != NULL; if (is_dp500 != dt_is_dp500) { DRM_ERROR("Device-tree expects %s, but hardware %s DP500.\n", dev_id->compatible, is_dp500 ? "is" : "is not"); return false; } else if (!dt_is_dp500) { u16 product_id; char buf[32]; core_id = malidp_hw_read(hwdev, MALIDP550_DC_BASE + MALIDP_DE_CORE_ID); product_id = MALIDP_PRODUCT_ID(core_id); snprintf(buf, sizeof(buf), "arm,mali-dp%X", product_id); if (!strnstr(dev_id->compatible, buf, sizeof(dev_id->compatible))) { DRM_ERROR("Device-tree expects %s, but hardware is DP%03X.\n", dev_id->compatible, product_id); return false; } } return true; } static bool malidp_has_sufficient_address_space(const struct resource *res, const struct of_device_id *dev_id) { resource_size_t res_size = resource_size(res); const char *compatstr_dp500 = "arm,mali-dp500"; if (!strnstr(dev_id->compatible, compatstr_dp500, sizeof(dev_id->compatible))) return res_size >= MALIDP550_ADDR_SPACE_SIZE; else if (res_size < MALIDP500_ADDR_SPACE_SIZE) return false; return true; } static ssize_t core_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm->dev_private; return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id); } static DEVICE_ATTR_RO(core_id); static struct attribute *mali_dp_attrs[] = { &dev_attr_core_id.attr, NULL, }; ATTRIBUTE_GROUPS(mali_dp); #define MAX_OUTPUT_CHANNELS 3 static int malidp_runtime_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm->dev_private; struct malidp_hw_device *hwdev = malidp->dev; /* we can only suspend if the hardware is in config mode */ WARN_ON(!hwdev->hw->in_config_mode(hwdev)); malidp_se_irq_fini(hwdev); malidp_de_irq_fini(hwdev); hwdev->pm_suspended = true; clk_disable_unprepare(hwdev->mclk); clk_disable_unprepare(hwdev->aclk); clk_disable_unprepare(hwdev->pclk); return 0; } static int malidp_runtime_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm->dev_private; struct malidp_hw_device *hwdev = malidp->dev; clk_prepare_enable(hwdev->pclk); clk_prepare_enable(hwdev->aclk); clk_prepare_enable(hwdev->mclk); hwdev->pm_suspended = false; malidp_de_irq_hw_init(hwdev); malidp_se_irq_hw_init(hwdev); return 0; } static int malidp_bind(struct device *dev) { struct resource *res; struct drm_device *drm; struct malidp_drm *malidp; struct malidp_hw_device *hwdev; struct platform_device *pdev = to_platform_device(dev); struct of_device_id const *dev_id; struct drm_encoder *encoder; /* number of lines for the R, G and B output */ u8 output_width[MAX_OUTPUT_CHANNELS]; int ret = 0, i; u32 version, out_depth = 0; malidp = devm_kzalloc(dev, sizeof(*malidp), GFP_KERNEL); if (!malidp) return -ENOMEM; hwdev = devm_kzalloc(dev, sizeof(*hwdev), GFP_KERNEL); if (!hwdev) return -ENOMEM; hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev); malidp->dev = hwdev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hwdev->regs = devm_ioremap_resource(dev, res); if (IS_ERR(hwdev->regs)) return PTR_ERR(hwdev->regs); hwdev->pclk = devm_clk_get(dev, "pclk"); if (IS_ERR(hwdev->pclk)) return PTR_ERR(hwdev->pclk); hwdev->aclk = devm_clk_get(dev, "aclk"); if (IS_ERR(hwdev->aclk)) return PTR_ERR(hwdev->aclk); hwdev->mclk = devm_clk_get(dev, "mclk"); if (IS_ERR(hwdev->mclk)) return PTR_ERR(hwdev->mclk); hwdev->pxlclk = devm_clk_get(dev, "pxlclk"); if (IS_ERR(hwdev->pxlclk)) return PTR_ERR(hwdev->pxlclk); /* Get the optional framebuffer memory resource */ ret = of_reserved_mem_device_init(dev); if (ret && ret != -ENODEV) return ret; drm = drm_dev_alloc(&malidp_driver, dev); if (IS_ERR(drm)) { ret = PTR_ERR(drm); goto alloc_fail; } drm->dev_private = malidp; dev_set_drvdata(dev, drm); /* Enable power management */ pm_runtime_enable(dev); /* Resume device to enable the clocks */ if (pm_runtime_enabled(dev)) pm_runtime_get_sync(dev); else malidp_runtime_pm_resume(dev); dev_id = of_match_device(malidp_drm_of_match, dev); if (!dev_id) { ret = -EINVAL; goto query_hw_fail; } if (!malidp_has_sufficient_address_space(res, dev_id)) { DRM_ERROR("Insufficient address space in device-tree.\n"); ret = -EINVAL; goto query_hw_fail; } if (!malidp_is_compatible_hw_id(hwdev, dev_id)) { ret = -EINVAL; goto query_hw_fail; } ret = hwdev->hw->query_hw(hwdev); if (ret) { DRM_ERROR("Invalid HW configuration\n"); goto query_hw_fail; } version = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_DE_CORE_ID); DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16, (version >> 12) & 0xf, (version >> 8) & 0xf); malidp->core_id = version; ret = of_property_read_u32(dev->of_node, "arm,malidp-arqos-value", &hwdev->arqos_value); if (ret) hwdev->arqos_value = 0x0; /* set the number of lines used for output of RGB data */ ret = of_property_read_u8_array(dev->of_node, "arm,malidp-output-port-lines", output_width, MAX_OUTPUT_CHANNELS); if (ret) goto query_hw_fail; for (i = 0; i < MAX_OUTPUT_CHANNELS; i++) out_depth = (out_depth << 8) | (output_width[i] & 0xf); malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base); hwdev->output_color_depth = out_depth; atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_INIT); init_waitqueue_head(&malidp->wq); ret = malidp_init(drm); if (ret < 0) goto query_hw_fail; /* Set the CRTC's port so that the encoder component can find it */ malidp->crtc.port = of_graph_get_port_by_id(dev->of_node, 0); ret = component_bind_all(dev, drm); if (ret) { DRM_ERROR("Failed to bind all components\n"); goto bind_fail; } /* We expect to have a maximum of two encoders one for the actual * display and a virtual one for the writeback connector */ WARN_ON(drm->mode_config.num_encoder > 2); list_for_each_entry(encoder, &drm->mode_config.encoder_list, head) { encoder->possible_clones = (1 << drm->mode_config.num_encoder) - 1; } ret = malidp_irq_init(pdev); if (ret < 0) goto irq_init_fail; ret = drm_vblank_init(drm, drm->mode_config.num_crtc); if (ret < 0) { DRM_ERROR("failed to initialise vblank\n"); goto vblank_fail; } pm_runtime_put(dev); drm_mode_config_reset(drm); drm_kms_helper_poll_init(drm); ret = drm_dev_register(drm, 0); if (ret) goto register_fail; drm_fbdev_generic_setup(drm, 32); return 0; register_fail: drm_kms_helper_poll_fini(drm); pm_runtime_get_sync(dev); vblank_fail: malidp_se_irq_fini(hwdev); malidp_de_irq_fini(hwdev); irq_init_fail: drm_atomic_helper_shutdown(drm); component_unbind_all(dev, drm); bind_fail: of_node_put(malidp->crtc.port); malidp->crtc.port = NULL; malidp_fini(drm); query_hw_fail: pm_runtime_put(dev); if (pm_runtime_enabled(dev)) pm_runtime_disable(dev); else malidp_runtime_pm_suspend(dev); drm->dev_private = NULL; dev_set_drvdata(dev, NULL); drm_dev_put(drm); alloc_fail: of_reserved_mem_device_release(dev); return ret; } static void malidp_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm->dev_private; struct malidp_hw_device *hwdev = malidp->dev; drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); pm_runtime_get_sync(dev); drm_atomic_helper_shutdown(drm); malidp_se_irq_fini(hwdev); malidp_de_irq_fini(hwdev); component_unbind_all(dev, drm); of_node_put(malidp->crtc.port); malidp->crtc.port = NULL; malidp_fini(drm); pm_runtime_put(dev); if (pm_runtime_enabled(dev)) pm_runtime_disable(dev); else malidp_runtime_pm_suspend(dev); drm->dev_private = NULL; dev_set_drvdata(dev, NULL); drm_dev_put(drm); of_reserved_mem_device_release(dev); } static const struct component_master_ops malidp_master_ops = { .bind = malidp_bind, .unbind = malidp_unbind, }; static int malidp_compare_dev(struct device *dev, void *data) { struct device_node *np = data; return dev->of_node == np; } static int malidp_platform_probe(struct platform_device *pdev) { struct device_node *port; struct component_match *match = NULL; if (!pdev->dev.of_node) return -ENODEV; /* there is only one output port inside each device, find it */ port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0); if (!port) return -ENODEV; drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev, port); of_node_put(port); return component_master_add_with_match(&pdev->dev, &malidp_master_ops, match); } static int malidp_platform_remove(struct platform_device *pdev) { component_master_del(&pdev->dev, &malidp_master_ops); return 0; } static int __maybe_unused malidp_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); return drm_mode_config_helper_suspend(drm); } static int __maybe_unused malidp_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); drm_mode_config_helper_resume(drm); return 0; } static int __maybe_unused malidp_pm_suspend_late(struct device *dev) { if (!pm_runtime_status_suspended(dev)) { malidp_runtime_pm_suspend(dev); pm_runtime_set_suspended(dev); } return 0; } static int __maybe_unused malidp_pm_resume_early(struct device *dev) { malidp_runtime_pm_resume(dev); pm_runtime_set_active(dev); return 0; } static const struct dev_pm_ops malidp_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend, malidp_pm_resume) \ SET_LATE_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend_late, malidp_pm_resume_early) \ SET_RUNTIME_PM_OPS(malidp_runtime_pm_suspend, malidp_runtime_pm_resume, NULL) }; static struct platform_driver malidp_platform_driver = { .probe = malidp_platform_probe, .remove = malidp_platform_remove, .driver = { .name = "mali-dp", .pm = &malidp_pm_ops, .of_match_table = malidp_drm_of_match, .dev_groups = mali_dp_groups, }, }; module_platform_driver(malidp_platform_driver); MODULE_AUTHOR("Liviu Dudau <Liviu.Dudau@arm.com>"); MODULE_DESCRIPTION("ARM Mali DP DRM driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
TeamWin/android_kernel_lge_msm8974
arch/arm/mach-msm/qdsp5/audio_qcelp_in.c
330
39326
/* arch/arm/mach-msm/qdsp5/audio_qcelp_in.c * * qcelp audio input device * * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This code is based in part on arch/arm/mach-msm/qdsp5v2/audio_qcelp_in.c, * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/kthread.h> #include <linux/wait.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/msm_audio_qcp.h> #include <linux/memory_alloc.h> #include <linux/msm_ion.h> #include <asm/atomic.h> #include <asm/ioctls.h> #include <mach/msm_memtypes.h> #include <mach/msm_adsp.h> #include <mach/msm_rpcrouter.h> #include <mach/iommu.h> #include <mach/iommu_domains.h> #include "audmgr.h" #include <mach/qdsp5/qdsp5audpreproc.h> #include <mach/qdsp5/qdsp5audpreproccmdi.h> #include <mach/qdsp5/qdsp5audpreprocmsg.h> #include <mach/qdsp5/qdsp5audreccmdi.h> #include <mach/qdsp5/qdsp5audrecmsg.h> #include <mach/debug_mm.h> #define FRAME_HEADER_SIZE 8 /* 8 bytes frame header */ #define NT_FRAME_HEADER_SIZE 24 /* 24 bytes frame header */ /* FRAME_NUM must be a power of two */ #define FRAME_NUM 8 #define QCELP_FRAME_SIZE 36 /* 36 bytes data */ /*Tunnel mode : 36 bytes data + 8 byte header*/ #define FRAME_SIZE (QCELP_FRAME_SIZE + FRAME_HEADER_SIZE) /* 36 bytes data + 24 meta field*/ #define NT_FRAME_SIZE (QCELP_FRAME_SIZE + NT_FRAME_HEADER_SIZE) #define DMASZ (FRAME_SIZE * FRAME_NUM) #define NT_DMASZ (NT_FRAME_SIZE * FRAME_NUM) #define OUT_FRAME_NUM 2 #define OUT_BUFFER_SIZE (4 * 1024 + NT_FRAME_HEADER_SIZE) #define BUFFER_SIZE (OUT_BUFFER_SIZE * OUT_FRAME_NUM) /* Offset from beginning of buffer*/ #define AUDPREPROC_QCELP_EOS_FLG_OFFSET 0x0A #define AUDPREPROC_QCELP_EOS_FLG_MASK 0x01 #define AUDPREPROC_QCELP_EOS_NONE 0x0 /* No EOS detected */ #define AUDPREPROC_QCELP_EOS_SET 0x1 /* EOS set in meta field */ struct buffer { void *data; uint32_t size; uint32_t read; uint32_t addr; uint32_t used; uint32_t mfield_sz; }; struct audio_qcelp_in { struct buffer in[FRAME_NUM]; spinlock_t dsp_lock; atomic_t in_bytes; atomic_t in_samples; struct mutex lock; struct mutex read_lock; wait_queue_head_t wait; wait_queue_head_t wait_enable; /*write section*/ struct buffer out[OUT_FRAME_NUM]; uint8_t out_head; uint8_t out_tail; uint8_t out_needed; /* number of buffers the dsp is waiting for */ uint32_t out_count; struct mutex write_lock; wait_queue_head_t write_wait; int32_t out_phys; /* physical address of write buffer */ char *out_data; int mfield; /* meta field embedded in data */ int wflush; /*write flush */ int rflush; /*read flush*/ int out_frame_cnt; struct msm_adsp_module *audrec; /* configuration to use on next enable */ uint32_t samp_rate; uint32_t channel_mode; uint32_t buffer_size; /* Frame size (36 bytes) */ uint32_t enc_type; /* 11 for QCELP */ uint32_t mode; /* T or NT Mode*/ struct msm_audio_qcelp_enc_config cfg; uint32_t dsp_cnt; uint32_t in_head; /* next buffer dsp will write */ uint32_t in_tail; /* next buffer read() will read */ uint32_t in_count; /* number of buffers available to read() */ uint32_t eos_ack; uint32_t flush_ack; const char *module_name; unsigned queue_ids; uint16_t enc_id; /* Session Id */ unsigned short samp_rate_index; uint32_t audrec_obj_idx ; struct audmgr audmgr; /* data allocated for various buffers */ char *data; dma_addr_t phys; void *map_v_read; void *map_v_write; int opened; int enabled; int running; int stopped; /* set when stopped, cleared on flush */ struct ion_client *client; struct ion_handle *input_buff_handle; struct ion_handle *output_buff_handle; struct audrec_session_info session_info; /*audrec session info*/ }; struct audio_frame { uint16_t frame_count_lsw; uint16_t frame_count_msw; uint16_t frame_length; uint16_t erased_pcm; unsigned char raw_bitstream[]; } __packed; struct audio_frame_nt { uint16_t metadata_len; uint16_t frame_count_lsw; uint16_t frame_count_msw; uint16_t frame_length; uint16_t erased_pcm; uint16_t reserved; uint16_t time_stamp_dword_lsw; uint16_t time_stamp_dword_msw; uint16_t time_stamp_lsw; uint16_t time_stamp_msw; uint16_t nflag_lsw; uint16_t nflag_msw; unsigned char raw_bitstream[]; /* samples */ } __packed; struct qcelp_encoded_meta_out { uint16_t metadata_len; uint16_t time_stamp_dword_lsw; uint16_t time_stamp_dword_msw; uint16_t time_stamp_lsw; uint16_t time_stamp_msw; uint16_t nflag_lsw; uint16_t nflag_msw; }; /* Audrec Queue command sent macro's */ #define audio_send_queue_pre(audio, cmd, len) \ msm_adsp_write(audio->audpre, QDSP_uPAudPreProcCmdQueue, cmd, len) #define audio_send_queue_recbs(audio, cmd, len) \ msm_adsp_write(audio->audrec, ((audio->queue_ids & 0xFFFF0000) >> 16),\ cmd, len) #define audio_send_queue_rec(audio, cmd, len) \ msm_adsp_write(audio->audrec, (audio->queue_ids & 0x0000FFFF),\ cmd, len) static int audqcelp_in_dsp_enable(struct audio_qcelp_in *audio, int enable); static int audqcelp_in_encparam_config(struct audio_qcelp_in *audio); static int audqcelp_in_encmem_config(struct audio_qcelp_in *audio); static int audqcelp_in_dsp_read_buffer(struct audio_qcelp_in *audio, uint32_t read_cnt); static void audqcelp_in_flush(struct audio_qcelp_in *audio); static void audqcelp_in_get_dsp_frames(struct audio_qcelp_in *audio); static int audpcm_config(struct audio_qcelp_in *audio); static void audqcelp_out_flush(struct audio_qcelp_in *audio); static int audqcelp_in_routing_mode_config(struct audio_qcelp_in *audio); static void audrec_pcm_send_data(struct audio_qcelp_in *audio, unsigned needed); static void audqcelp_nt_in_get_dsp_frames(struct audio_qcelp_in *audio); static void audqcelp_in_flush(struct audio_qcelp_in *audio); static unsigned convert_samp_index(unsigned index) { switch (index) { case RPC_AUD_DEF_SAMPLE_RATE_48000: return 48000; case RPC_AUD_DEF_SAMPLE_RATE_44100: return 44100; case RPC_AUD_DEF_SAMPLE_RATE_32000: return 32000; case RPC_AUD_DEF_SAMPLE_RATE_24000: return 24000; case RPC_AUD_DEF_SAMPLE_RATE_22050: return 22050; case RPC_AUD_DEF_SAMPLE_RATE_16000: return 16000; case RPC_AUD_DEF_SAMPLE_RATE_12000: return 12000; case RPC_AUD_DEF_SAMPLE_RATE_11025: return 11025; case RPC_AUD_DEF_SAMPLE_RATE_8000: return 8000; default: return 11025; } } /* ------------------- dsp --------------------- */ static void audpre_dsp_event(void *data, unsigned id, void *event_data) { uint16_t *msg = event_data; if (!msg) return; switch (id) { case AUDPREPROC_MSG_CMD_CFG_DONE_MSG: MM_DBG("type %d, status_flag %d\n",\ msg[0], msg[1]); break; case AUDPREPROC_MSG_ERROR_MSG_ID: MM_INFO("err_index %d\n", msg[0]); break; case ADSP_MESSAGE_ID: MM_DBG("Received ADSP event: module enable(audpreproctask)\n"); break; default: MM_ERR("unknown event %d\n", id); } } /* must be called with audio->lock held */ static int audqcelp_in_enable(struct audio_qcelp_in *audio) { struct audmgr_config cfg; int rc; if (audio->enabled) return 0; cfg.tx_rate = audio->samp_rate; cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE; cfg.def_method = RPC_AUD_DEF_METHOD_RECORD; cfg.codec = RPC_AUD_DEF_CODEC_13K; cfg.snd_method = RPC_SND_METHOD_MIDI; if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) { rc = audmgr_enable(&audio->audmgr, &cfg); if (rc < 0) return rc; if (audpreproc_enable(audio->enc_id, &audpre_dsp_event, audio)) { MM_ERR("msm_adsp_enable(audpreproc) failed\n"); audmgr_disable(&audio->audmgr); return -ENODEV; } /*update aurec session info in audpreproc layer*/ audio->session_info.session_id = audio->enc_id; audio->session_info.sampling_freq = convert_samp_index(audio->samp_rate); audpreproc_update_audrec_info(&audio->session_info); } if (msm_adsp_enable(audio->audrec)) { if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) { audpreproc_disable(audio->enc_id, audio); audmgr_disable(&audio->audmgr); } MM_ERR("msm_adsp_enable(audrec) failed\n"); return -ENODEV; } audio->enabled = 1; audqcelp_in_dsp_enable(audio, 1); return 0; } /* must be called with audio->lock held */ static int audqcelp_in_disable(struct audio_qcelp_in *audio) { if (audio->enabled) { audio->enabled = 0; audqcelp_in_dsp_enable(audio, 0); wait_event_interruptible_timeout(audio->wait_enable, audio->running == 0, 1*HZ); audio->stopped = 1; wake_up(&audio->wait); msm_adsp_disable(audio->audrec); if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) { audpreproc_disable(audio->enc_id, audio); audmgr_disable(&audio->audmgr); /*reset the sampling frequency information at audpreproc layer*/ audio->session_info.sampling_freq = 0; audpreproc_update_audrec_info(&audio->session_info); } } return 0; } static void audqcelp_in_get_dsp_frames(struct audio_qcelp_in *audio) { struct audio_frame *frame; uint32_t index; unsigned long flags; index = audio->in_head; frame = (void *) (((char *)audio->in[index].data) - sizeof(*frame)); spin_lock_irqsave(&audio->dsp_lock, flags); audio->in[index].size = frame->frame_length; /* statistics of read */ atomic_add(audio->in[index].size, &audio->in_bytes); atomic_add(1, &audio->in_samples); audio->in_head = (audio->in_head + 1) & (FRAME_NUM - 1); /* If overflow, move the tail index foward. */ if (audio->in_head == audio->in_tail) { MM_ERR("Error! not able to keep up the read\n"); audio->in_tail = (audio->in_tail + 1) & (FRAME_NUM - 1); MM_ERR("in_count = %d\n", audio->in_count); } else audio->in_count++; audqcelp_in_dsp_read_buffer(audio, audio->dsp_cnt++); spin_unlock_irqrestore(&audio->dsp_lock, flags); wake_up(&audio->wait); } static void audqcelp_nt_in_get_dsp_frames(struct audio_qcelp_in *audio) { struct audio_frame_nt *nt_frame; uint32_t index; unsigned long flags; index = audio->in_head; nt_frame = (void *) (((char *)audio->in[index].data) - \ sizeof(struct audio_frame_nt)); spin_lock_irqsave(&audio->dsp_lock, flags); audio->in[index].size = nt_frame->frame_length; /* statistics of read */ atomic_add(audio->in[index].size, &audio->in_bytes); atomic_add(1, &audio->in_samples); audio->in_head = (audio->in_head + 1) & (FRAME_NUM - 1); /* If overflow, move the tail index foward. */ if (audio->in_head == audio->in_tail) MM_DBG("Error! not able to keep up the read\n"); else audio->in_count++; spin_unlock_irqrestore(&audio->dsp_lock, flags); wake_up(&audio->wait); } static int audrec_pcm_buffer_ptr_refresh(struct audio_qcelp_in *audio, unsigned idx, unsigned len) { struct audrec_cmd_pcm_buffer_ptr_refresh_arm_enc cmd; if (len == NT_FRAME_HEADER_SIZE) len = len / 2; else len = (len + NT_FRAME_HEADER_SIZE) / 2; MM_DBG("len = %d\n", len); memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_PCM_BUFFER_PTR_REFRESH_ARM_TO_ENC; cmd.num_buffers = 1; if (cmd.num_buffers == 1) { cmd.buf_address_length[0] = (audio->out[idx].addr & 0xffff0000) >> 16; cmd.buf_address_length[1] = (audio->out[idx].addr & 0x0000ffff); cmd.buf_address_length[2] = (len & 0xffff0000) >> 16; cmd.buf_address_length[3] = (len & 0x0000ffff); } audio->out_frame_cnt++; return audio_send_queue_rec(audio, &cmd, sizeof(cmd)); } static int audpcm_config(struct audio_qcelp_in *audio) { struct audrec_cmd_pcm_cfg_arm_to_enc cmd; MM_DBG("\n"); memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_PCM_CFG_ARM_TO_ENC; cmd.config_update_flag = AUDREC_PCM_CONFIG_UPDATE_FLAG_ENABLE; cmd.enable_flag = AUDREC_ENABLE_FLAG_VALUE; cmd.sampling_freq = convert_samp_index(audio->samp_rate); if (!audio->channel_mode) cmd.channels = 1; else cmd.channels = 2; cmd.frequency_of_intimation = 1; cmd.max_number_of_buffers = OUT_FRAME_NUM; return audio_send_queue_rec(audio, &cmd, sizeof(cmd)); } static int audqcelp_in_routing_mode_config(struct audio_qcelp_in *audio) { struct audrec_cmd_routing_mode cmd; MM_DBG("\n"); memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_ROUTING_MODE; if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) cmd.routing_mode = 1; return audio_send_queue_rec(audio, &cmd, sizeof(cmd)); } static void audrec_dsp_event(void *data, unsigned id, size_t len, void (*getevent)(void *ptr, size_t len)) { struct audio_qcelp_in *audio = NULL; if (data) audio = data; else { MM_ERR("invalid data for event %x\n", id); return; } switch (id) { case AUDREC_MSG_CMD_CFG_DONE_MSG: { struct audrec_msg_cmd_cfg_done_msg cmd_cfg_done_msg; getevent(&cmd_cfg_done_msg, AUDREC_MSG_CMD_CFG_DONE_MSG_LEN); if (cmd_cfg_done_msg.audrec_enc_type & \ AUDREC_MSG_CFG_DONE_ENC_ENA) { audio->audrec_obj_idx = cmd_cfg_done_msg.audrec_obj_idx; MM_DBG("CFG ENABLED\n"); if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) { MM_DBG("routing command\n"); audqcelp_in_routing_mode_config(audio); } else { audqcelp_in_encmem_config(audio); } } else { MM_DBG("CFG SLEEP\n"); audio->running = 0; wake_up(&audio->wait_enable); } break; } case AUDREC_MSG_CMD_ROUTING_MODE_DONE_MSG: { struct audrec_msg_cmd_routing_mode_done_msg \ routing_msg; getevent(&routing_msg, AUDREC_MSG_CMD_ROUTING_MODE_DONE_MSG); MM_DBG("AUDREC_MSG_CMD_ROUTING_MODE_DONE_MSG"); if (routing_msg.configuration == 0) { MM_ERR("routing configuration failed\n"); audio->running = 0; wake_up(&audio->wait_enable); } else audqcelp_in_encmem_config(audio); break; } case AUDREC_MSG_CMD_AREC_MEM_CFG_DONE_MSG: { MM_DBG("AREC_MEM_CFG_DONE_MSG\n"); if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) audqcelp_in_encparam_config(audio); else audpcm_config(audio); break; } case AUDREC_CMD_PCM_CFG_ARM_TO_ENC_DONE_MSG: { MM_DBG("AUDREC_CMD_PCM_CFG_ARM_TO_ENC_DONE_MSG"); audqcelp_in_encparam_config(audio); break; } case AUDREC_MSG_CMD_AREC_PARAM_CFG_DONE_MSG: { MM_DBG("AUDREC_MSG_CMD_AREC_PARAM_CFG_DONE_MSG\n"); audio->running = 1; wake_up(&audio->wait_enable); if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) audrec_pcm_send_data(audio, 1); break; } case AUDREC_CMD_PCM_BUFFER_PTR_UPDATE_ARM_TO_ENC_MSG: { MM_DBG("ptr_update recieved from DSP\n"); audrec_pcm_send_data(audio, 1); break; } case AUDREC_MSG_NO_EXT_PKT_AVAILABLE_MSG: { struct audrec_msg_no_ext_pkt_avail_msg err_msg; getevent(&err_msg, AUDREC_MSG_NO_EXT_PKT_AVAILABLE_MSG_LEN); MM_DBG("NO_EXT_PKT_AVAILABLE_MSG %x\n",\ err_msg.audrec_err_id); break; } case AUDREC_MSG_PACKET_READY_MSG: { struct audrec_msg_packet_ready_msg pkt_ready_msg; getevent(&pkt_ready_msg, AUDREC_MSG_PACKET_READY_MSG_LEN); MM_DBG("UP_PACKET_READY_MSG: write cnt msw %d \ write cnt lsw %d read cnt msw %d read cnt lsw %d \n",\ pkt_ready_msg.pkt_counter_msw, \ pkt_ready_msg.pkt_counter_lsw, \ pkt_ready_msg.pkt_read_cnt_msw, \ pkt_ready_msg.pkt_read_cnt_lsw); audqcelp_in_get_dsp_frames(audio); break; } case AUDREC_UP_NT_PACKET_READY_MSG: { struct audrec_up_nt_packet_ready_msg pkt_ready_msg; getevent(&pkt_ready_msg, AUDREC_UP_NT_PACKET_READY_MSG_LEN); MM_DBG("UP_NT_PACKET_READY_MSG: write cnt lsw %d \ write cnt msw %d read cnt lsw %d read cnt msw %d \n",\ pkt_ready_msg.audrec_packetwrite_cnt_lsw, \ pkt_ready_msg.audrec_packetwrite_cnt_msw, \ pkt_ready_msg.audrec_upprev_readcount_lsw, \ pkt_ready_msg.audrec_upprev_readcount_msw); audqcelp_nt_in_get_dsp_frames(audio); break; } case AUDREC_CMD_FLUSH_DONE_MSG: { audio->wflush = 0; audio->rflush = 0; audio->flush_ack = 1; wake_up(&audio->write_wait); MM_DBG("flush ack recieved\n"); break; } case ADSP_MESSAGE_ID: MM_DBG("Received ADSP event: module \ enable/disable(audrectask)\n"); break; default: MM_ERR("unknown event %d\n", id); } } static struct msm_adsp_ops audrec_qcelp_adsp_ops = { .event = audrec_dsp_event, }; static int audqcelp_in_dsp_enable(struct audio_qcelp_in *audio, int enable) { struct audrec_cmd_enc_cfg cmd; memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_ENC_CFG; cmd.audrec_enc_type = (audio->enc_type & 0xFF) | (enable ? AUDREC_CMD_ENC_ENA : AUDREC_CMD_ENC_DIS); /* Don't care */ cmd.audrec_obj_idx = audio->audrec_obj_idx; return audio_send_queue_rec(audio, &cmd, sizeof(cmd)); } static int audqcelp_in_encmem_config(struct audio_qcelp_in *audio) { struct audrec_cmd_arecmem_cfg cmd; uint16_t *data = (void *) audio->data; int n; int header_len = 0; memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_ARECMEM_CFG; cmd.audrec_obj_idx = audio->audrec_obj_idx; /* Rate at which packet complete message comes */ cmd.audrec_up_pkt_intm_cnt = 1; cmd.audrec_extpkt_buffer_msw = audio->phys >> 16; cmd.audrec_extpkt_buffer_lsw = audio->phys; /* Max Buffer no available for frames */ cmd.audrec_extpkt_buffer_num = FRAME_NUM; /* prepare buffer pointers: * T:36 bytes qcelp packet + 4 halfword header * NT:36 bytes qcelp packet + 12 halfword header */ if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) header_len = FRAME_HEADER_SIZE/2; else header_len = NT_FRAME_HEADER_SIZE/2; for (n = 0; n < FRAME_NUM; n++) { audio->in[n].data = data + header_len; data += (QCELP_FRAME_SIZE/2) + header_len; MM_DBG("0x%8x\n", (int)(audio->in[n].data - header_len*2)); } return audio_send_queue_rec(audio, &cmd, sizeof(cmd)); } static int audqcelp_in_encparam_config(struct audio_qcelp_in *audio) { struct audrec_cmd_arecparam_qcelp_cfg cmd; memset(&cmd, 0, sizeof(cmd)); cmd.common.cmd_id = AUDREC_CMD_ARECPARAM_CFG; cmd.common.audrec_obj_idx = audio->audrec_obj_idx; cmd.enc_min_rate = audio->cfg.min_bit_rate; cmd.enc_max_rate = audio->cfg.max_bit_rate; cmd.rate_modulation_cmd = 0; /* Default set to 0 */ cmd.reduced_rate_level = 0; /* Default set to 0 */ return audio_send_queue_rec(audio, &cmd, sizeof(cmd)); } static int audqcelp_flush_command(struct audio_qcelp_in *audio) { struct audrec_cmd_flush cmd; MM_DBG("\n"); memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_FLUSH; return audio_send_queue_rec(audio, &cmd, sizeof(cmd)); } static int audqcelp_in_dsp_read_buffer(struct audio_qcelp_in *audio, uint32_t read_cnt) { audrec_cmd_packet_ext_ptr cmd; memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_PACKET_EXT_PTR; cmd.type = audio->audrec_obj_idx; cmd.curr_rec_count_msw = read_cnt >> 16; cmd.curr_rec_count_lsw = read_cnt; return audio_send_queue_recbs(audio, &cmd, sizeof(cmd)); } /* ------------------- device --------------------- */ static void audqcelp_ioport_reset(struct audio_qcelp_in *audio) { /* Make sure read/write thread are free from * sleep and knowing that system is not able * to process io request at the moment */ wake_up(&audio->wait); mutex_lock(&audio->read_lock); audqcelp_in_flush(audio); mutex_unlock(&audio->read_lock); wake_up(&audio->write_wait); mutex_lock(&audio->write_lock); audqcelp_out_flush(audio); mutex_unlock(&audio->write_lock); } static void audqcelp_in_flush(struct audio_qcelp_in *audio) { int i; unsigned long flags; audio->eos_ack = 0; spin_lock_irqsave(&audio->dsp_lock, flags); audio->dsp_cnt = 0; audio->in_head = 0; audio->in_tail = 0; audio->in_count = 0; for (i = FRAME_NUM-1; i >= 0; i--) { audio->in[i].size = 0; audio->in[i].read = 0; } spin_unlock_irqrestore(&audio->dsp_lock, flags); MM_DBG("in_bytes %d\n", atomic_read(&audio->in_bytes)); MM_DBG("in_samples %d\n", atomic_read(&audio->in_samples)); atomic_set(&audio->in_bytes, 0); atomic_set(&audio->in_samples, 0); } static void audqcelp_out_flush(struct audio_qcelp_in *audio) { int i; unsigned long flags; audio->out_head = 0; audio->out_count = 0; spin_lock_irqsave(&audio->dsp_lock, flags); audio->out_tail = 0; for (i = OUT_FRAME_NUM-1; i >= 0; i--) { audio->out[i].size = 0; audio->out[i].read = 0; audio->out[i].used = 0; } spin_unlock_irqrestore(&audio->dsp_lock, flags); } /* ------------------- device --------------------- */ static long audqcelp_in_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct audio_qcelp_in *audio = file->private_data; int rc = 0; MM_DBG("\n"); if (cmd == AUDIO_GET_STATS) { struct msm_audio_stats stats; stats.byte_count = atomic_read(&audio->in_bytes); stats.sample_count = atomic_read(&audio->in_samples); if (copy_to_user((void *) arg, &stats, sizeof(stats))) return -EFAULT; return rc; } mutex_lock(&audio->lock); switch (cmd) { case AUDIO_START: { rc = audqcelp_in_enable(audio); if (!rc) { rc = wait_event_interruptible_timeout(audio->wait_enable, audio->running != 0, 1*HZ); MM_DBG("state %d rc = %d\n", audio->running, rc); if (audio->running == 0) rc = -ENODEV; else rc = 0; } audio->stopped = 0; break; } case AUDIO_STOP: { rc = audqcelp_in_disable(audio); break; } case AUDIO_FLUSH: { MM_DBG("AUDIO_FLUSH\n"); audio->rflush = 1; audio->wflush = 1; audqcelp_ioport_reset(audio); if (audio->running) { audqcelp_flush_command(audio); rc = wait_event_interruptible(audio->write_wait, !audio->wflush); if (rc < 0) { MM_ERR("AUDIO_FLUSH interrupted\n"); rc = -EINTR; } } else { audio->rflush = 0; audio->wflush = 0; } break; } case AUDIO_GET_CONFIG: { struct msm_audio_config cfg; memset(&cfg, 0, sizeof(cfg)); cfg.buffer_size = OUT_BUFFER_SIZE; cfg.buffer_count = OUT_FRAME_NUM; cfg.sample_rate = convert_samp_index(audio->samp_rate); cfg.channel_count = 1; cfg.type = 0; cfg.unused[0] = 0; cfg.unused[1] = 0; cfg.unused[2] = 0; if (copy_to_user((void *) arg, &cfg, sizeof(cfg))) rc = -EFAULT; else rc = 0; break; } case AUDIO_GET_STREAM_CONFIG: { struct msm_audio_stream_config cfg; memset(&cfg, 0, sizeof(cfg)); cfg.buffer_size = audio->buffer_size; cfg.buffer_count = FRAME_NUM; if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) rc = -EFAULT; else rc = 0; break; } case AUDIO_SET_STREAM_CONFIG: { struct msm_audio_stream_config cfg; if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } /* Allow only single frame */ if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) { if (cfg.buffer_size != (FRAME_SIZE - 8)) { rc = -EINVAL; break; } } else { if (cfg.buffer_size != (QCELP_FRAME_SIZE + 14)) { rc = -EINVAL; break; } } audio->buffer_size = cfg.buffer_size; break; } case AUDIO_GET_QCELP_ENC_CONFIG: { if (copy_to_user((void *) arg, &audio->cfg, sizeof(audio->cfg))) rc = -EFAULT; break; } case AUDIO_SET_QCELP_ENC_CONFIG: { struct msm_audio_qcelp_enc_config cfg; if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } MM_DBG("0X%8x, 0x%8x, 0x%8x\n", cfg.min_bit_rate, cfg.max_bit_rate, cfg.cdma_rate); if (cfg.min_bit_rate > CDMA_RATE_FULL || \ cfg.min_bit_rate < CDMA_RATE_EIGHTH) { MM_ERR("invalid min bitrate\n"); rc = -EFAULT; break; } if (cfg.max_bit_rate > CDMA_RATE_FULL || \ cfg.max_bit_rate < CDMA_RATE_EIGHTH) { MM_ERR("invalid max bitrate\n"); rc = -EFAULT; break; } /* Recording Does not support Erase and Blank */ if (cfg.cdma_rate > CDMA_RATE_FULL || cfg.cdma_rate < CDMA_RATE_EIGHTH) { MM_ERR("invalid qcelp cdma rate\n"); rc = -EFAULT; break; } memcpy(&audio->cfg, &cfg, sizeof(cfg)); break; } default: rc = -EINVAL; } mutex_unlock(&audio->lock); return rc; } static ssize_t audqcelp_in_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct audio_qcelp_in *audio = file->private_data; unsigned long flags; const char __user *start = buf; void *data; uint32_t index; uint32_t size; int rc = 0; struct qcelp_encoded_meta_out meta_field; struct audio_frame_nt *nt_frame; MM_DBG("count = %d\n", count); mutex_lock(&audio->read_lock); while (count > 0) { rc = wait_event_interruptible( audio->wait, (audio->in_count > 0) || audio->stopped || audio->rflush); if (rc < 0) break; if (audio->rflush) { rc = -EBUSY; break; } if (audio->stopped && !audio->in_count) { MM_DBG("Driver in stop state, No more buffer to read"); rc = 0;/* End of File */ break; } index = audio->in_tail; data = (uint8_t *) audio->in[index].data; size = audio->in[index].size; if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) { nt_frame = (struct audio_frame_nt *)(data - sizeof(struct audio_frame_nt)); memcpy((char *)&meta_field.time_stamp_dword_lsw, (char *)&nt_frame->time_stamp_dword_lsw, (sizeof(struct qcelp_encoded_meta_out) - \ sizeof(uint16_t))); meta_field.metadata_len = sizeof(struct qcelp_encoded_meta_out); if (copy_to_user((char *)start, (char *)&meta_field, sizeof(struct qcelp_encoded_meta_out))) { rc = -EFAULT; break; } if (nt_frame->nflag_lsw & 0x0001) { MM_ERR("recieved EOS in read call\n"); audio->eos_ack = 1; } buf += sizeof(struct qcelp_encoded_meta_out); count -= sizeof(struct qcelp_encoded_meta_out); } if (count >= size) { /* order the reads on the buffer */ dma_coherent_post_ops(); if (copy_to_user(buf, data, size)) { rc = -EFAULT; break; } spin_lock_irqsave(&audio->dsp_lock, flags); if (index != audio->in_tail) { /* overrun -- data is * invalid and we need to retry */ spin_unlock_irqrestore(&audio->dsp_lock, flags); continue; } audio->in[index].size = 0; audio->in_tail = (audio->in_tail + 1) & (FRAME_NUM - 1); audio->in_count--; spin_unlock_irqrestore(&audio->dsp_lock, flags); count -= size; buf += size; if ((audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL)) { if (!audio->eos_ack) { MM_DBG("sending read ptr command \ %d %d\n", audio->dsp_cnt, audio->in_tail); audqcelp_in_dsp_read_buffer(audio, audio->dsp_cnt++); } } } else { MM_ERR("short read\n"); break; } break; } mutex_unlock(&audio->read_lock); if (buf > start) return buf - start; return rc; } static void audrec_pcm_send_data(struct audio_qcelp_in *audio, unsigned needed) { struct buffer *frame; unsigned long flags; MM_DBG("\n"); spin_lock_irqsave(&audio->dsp_lock, flags); if (!audio->running) goto done; if (needed && !audio->wflush) { /* We were called from the callback because the DSP * requested more data. Note that the DSP does want * more data, and if a buffer was in-flight, mark it * as available (since the DSP must now be done with * it). */ audio->out_needed = 1; frame = audio->out + audio->out_tail; if (frame->used == 0xffffffff) { MM_DBG("frame %d free\n", audio->out_tail); frame->used = 0; audio->out_tail ^= 1; wake_up(&audio->write_wait); } } if (audio->out_needed) { /* If the DSP currently wants data and we have a * buffer available, we will send it and reset * the needed flag. We'll mark the buffer as in-flight * so that it won't be recycled until the next buffer * is requested */ frame = audio->out + audio->out_tail; if (frame->used) { BUG_ON(frame->used == 0xffffffff); audrec_pcm_buffer_ptr_refresh(audio, audio->out_tail, frame->used); frame->used = 0xffffffff; audio->out_needed = 0; } } done: spin_unlock_irqrestore(&audio->dsp_lock, flags); } static int audqcelp_in_fsync(struct file *file, loff_t a, loff_t b, int datasync) { struct audio_qcelp_in *audio = file->private_data; int rc = 0; MM_DBG("\n"); /* Macro prints the file name and function */ if (!audio->running || (audio->mode == MSM_AUD_ENC_MODE_TUNNEL)) { rc = -EINVAL; goto done_nolock; } mutex_lock(&audio->write_lock); rc = wait_event_interruptible(audio->write_wait, audio->wflush); MM_DBG("waked on by some event audio->wflush = %d\n", audio->wflush); if (rc < 0) goto done; else if (audio->wflush) { rc = -EBUSY; goto done; } done: mutex_unlock(&audio->write_lock); done_nolock: return rc; } int audrec_qcelp_process_eos(struct audio_qcelp_in *audio, const char __user *buf_start, unsigned short mfield_size) { struct buffer *frame; int rc = 0; frame = audio->out + audio->out_head; rc = wait_event_interruptible(audio->write_wait, (audio->out_needed && audio->out[0].used == 0 && audio->out[1].used == 0) || (audio->stopped) || (audio->wflush)); if (rc < 0) goto done; if (audio->stopped || audio->wflush) { rc = -EBUSY; goto done; } if (copy_from_user(frame->data, buf_start, mfield_size)) { rc = -EFAULT; goto done; } frame->mfield_sz = mfield_size; audio->out_head ^= 1; frame->used = mfield_size; MM_DBG("copying meta_out frame->used = %d\n", frame->used); audrec_pcm_send_data(audio, 0); done: return rc; } static ssize_t audqcelp_in_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct audio_qcelp_in *audio = file->private_data; const char __user *start = buf; struct buffer *frame; char *cpy_ptr; int rc = 0, eos_condition = AUDPREPROC_QCELP_EOS_NONE; unsigned short mfield_size = 0; int write_count = 0; MM_DBG("cnt=%d\n", count); if (count & 1) return -EINVAL; if (audio->mode != MSM_AUD_ENC_MODE_NONTUNNEL) return -EINVAL; mutex_lock(&audio->write_lock); frame = audio->out + audio->out_head; /* if supplied count is more than driver buffer size * then only copy driver buffer size */ if (count > frame->size) count = frame->size; write_count = count; cpy_ptr = frame->data; rc = wait_event_interruptible(audio->write_wait, (frame->used == 0) || (audio->stopped) || (audio->wflush)); if (rc < 0) goto error; if (audio->stopped || audio->wflush) { rc = -EBUSY; goto error; } if (audio->mfield) { if (buf == start) { /* Processing beginning of user buffer */ if (__get_user(mfield_size, (unsigned short __user *) buf)) { rc = -EFAULT; goto error; } else if (mfield_size > count) { rc = -EINVAL; goto error; } MM_DBG("mf offset_val %x\n", mfield_size); if (copy_from_user(cpy_ptr, buf, mfield_size)) { rc = -EFAULT; goto error; } /* Check if EOS flag is set and buffer has * contains just meta field */ if (cpy_ptr[AUDPREPROC_QCELP_EOS_FLG_OFFSET] & AUDPREPROC_QCELP_EOS_FLG_MASK) { eos_condition = AUDPREPROC_QCELP_EOS_SET; MM_DBG("EOS SET\n"); if (mfield_size == count) { buf += mfield_size; eos_condition = 0; goto exit; } else cpy_ptr[AUDPREPROC_QCELP_EOS_FLG_OFFSET] &= ~AUDPREPROC_QCELP_EOS_FLG_MASK; } cpy_ptr += mfield_size; count -= mfield_size; buf += mfield_size; } else { mfield_size = 0; MM_DBG("continuous buffer\n"); } frame->mfield_sz = mfield_size; } MM_DBG("copying the stream count = %d\n", count); if (copy_from_user(cpy_ptr, buf, count)) { rc = -EFAULT; goto error; } exit: frame->used = count; audio->out_head ^= 1; if (!audio->flush_ack) audrec_pcm_send_data(audio, 0); else { audrec_pcm_send_data(audio, 1); audio->flush_ack = 0; } if (eos_condition == AUDPREPROC_QCELP_EOS_SET) rc = audrec_qcelp_process_eos(audio, start, mfield_size); mutex_unlock(&audio->write_lock); return write_count; error: mutex_unlock(&audio->write_lock); return rc; } static int audqcelp_in_release(struct inode *inode, struct file *file) { struct audio_qcelp_in *audio = file->private_data; mutex_lock(&audio->lock); audqcelp_in_disable(audio); audqcelp_in_flush(audio); msm_adsp_put(audio->audrec); audpreproc_aenc_free(audio->enc_id); audio->audrec = NULL; audio->opened = 0; if ((audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) && \ (audio->out_data)) { ion_unmap_kernel(audio->client, audio->input_buff_handle); ion_free(audio->client, audio->input_buff_handle); audio->out_data = NULL; } if (audio->data) { ion_unmap_kernel(audio->client, audio->output_buff_handle); ion_free(audio->client, audio->output_buff_handle); audio->data = NULL; } ion_client_destroy(audio->client); mutex_unlock(&audio->lock); return 0; } static struct audio_qcelp_in the_audio_qcelp_in; static int audqcelp_in_open(struct inode *inode, struct file *file) { struct audio_qcelp_in *audio = &the_audio_qcelp_in; int rc; int encid; int dma_size = 0; int len = 0; unsigned long ionflag = 0; ion_phys_addr_t addr = 0; struct ion_handle *handle = NULL; struct ion_client *client = NULL; mutex_lock(&audio->lock); if (audio->opened) { rc = -EBUSY; goto done; } if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { audio->mode = MSM_AUD_ENC_MODE_NONTUNNEL; dma_size = NT_DMASZ; MM_DBG("Opened for non tunnel mode encoding\n"); } else if (!(file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { audio->mode = MSM_AUD_ENC_MODE_TUNNEL; dma_size = DMASZ; MM_DBG("Opened for tunnel mode encoding\n"); } else { MM_ERR("Invalid mode\n"); rc = -EACCES; goto done; } /* Settings will be re-config at AUDIO_SET_CONFIG, * but at least we need to have initial config */ audio->samp_rate = RPC_AUD_DEF_SAMPLE_RATE_8000, audio->samp_rate_index = AUDREC_CMD_SAMP_RATE_INDX_8000; audio->channel_mode = AUDREC_CMD_STEREO_MODE_MONO; if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) audio->buffer_size = (QCELP_FRAME_SIZE + 14); else audio->buffer_size = QCELP_FRAME_SIZE; audio->enc_type = AUDREC_CMD_TYPE_0_INDEX_QCELP | audio->mode; audio->cfg.cdma_rate = CDMA_RATE_FULL; audio->cfg.min_bit_rate = CDMA_RATE_FULL; audio->cfg.max_bit_rate = CDMA_RATE_FULL; if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) { rc = audmgr_open(&audio->audmgr); if (rc) goto done; } encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name, &audio->queue_ids); if (encid < 0) { MM_ERR("No free encoder available\n"); rc = -ENODEV; goto done; } audio->enc_id = encid; rc = msm_adsp_get(audio->module_name, &audio->audrec, &audrec_qcelp_adsp_ops, audio); if (rc) { audpreproc_aenc_free(audio->enc_id); goto done; } audio->dsp_cnt = 0; audio->stopped = 0; audio->wflush = 0; audio->rflush = 0; audio->flush_ack = 0; audqcelp_in_flush(audio); audqcelp_out_flush(audio); client = msm_ion_client_create(UINT_MAX, "Audio_QCELP_in_client"); if (IS_ERR_OR_NULL(client)) { MM_ERR("Unable to create ION client\n"); rc = -ENOMEM; goto client_create_error; } audio->client = client; MM_DBG("allocating mem sz = %d\n", dma_size); handle = ion_alloc(client, dma_size, SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0); if (IS_ERR_OR_NULL(handle)) { MM_ERR("Unable to create allocate O/P buffers\n"); rc = -ENOMEM; goto output_buff_alloc_error; } audio->output_buff_handle = handle; rc = ion_phys(client , handle, &addr, &len); if (rc) { MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); rc = -ENOMEM; goto output_buff_get_phys_error; } else { MM_INFO("O/P buffers:valid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); } audio->phys = (int32_t)addr; rc = ion_handle_get_flags(client, handle, &ionflag); if (rc) { MM_ERR("could not get flags for the handle\n"); rc = -ENOMEM; goto output_buff_get_flags_error; } audio->map_v_read = ion_map_kernel(client, handle); if (IS_ERR(audio->map_v_read)) { MM_ERR("could not map read buffers,freeing instance 0x%08x\n", (int)audio); rc = -ENOMEM; goto output_buff_map_error; } audio->data = audio->map_v_read; MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n", audio->phys, (int)audio->data); audio->out_data = NULL; if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) { MM_DBG("allocating BUFFER_SIZE %d\n", BUFFER_SIZE); handle = ion_alloc(client, BUFFER_SIZE, SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0); if (IS_ERR_OR_NULL(handle)) { MM_ERR("Unable to create allocate I/P buffers\n"); rc = -ENOMEM; goto input_buff_alloc_error; } audio->input_buff_handle = handle; rc = ion_phys(client , handle, &addr, &len); if (rc) { MM_ERR("I/P buffers:Invalid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); rc = -ENOMEM; goto input_buff_get_phys_error; } else { MM_INFO("Got valid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); } audio->out_phys = (int32_t)addr; rc = ion_handle_get_flags(client, handle, &ionflag); if (rc) { MM_ERR("could not get flags for the handle\n"); rc = -ENOMEM; goto input_buff_get_flags_error; } audio->map_v_write = ion_map_kernel(client, handle); if (IS_ERR(audio->map_v_write)) { MM_ERR("could not map write buffers\n"); rc = -ENOMEM; goto input_buff_map_error; } audio->out_data = audio->map_v_write; MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n", (unsigned int)addr, (unsigned int)audio->out_data); /* Initialize buffer */ audio->out[0].data = audio->out_data + 0; audio->out[0].addr = audio->out_phys + 0; audio->out[0].size = OUT_BUFFER_SIZE; audio->out[1].data = audio->out_data + OUT_BUFFER_SIZE; audio->out[1].addr = audio->out_phys + OUT_BUFFER_SIZE; audio->out[1].size = OUT_BUFFER_SIZE; MM_DBG("audio->out[0].data = %d audio->out[1].data = %d", (unsigned int)audio->out[0].data, (unsigned int)audio->out[1].data); audio->mfield = NT_FRAME_HEADER_SIZE; audio->out_frame_cnt++; } file->private_data = audio; audio->opened = 1; done: mutex_unlock(&audio->lock); return rc; input_buff_map_error: input_buff_get_flags_error: input_buff_get_phys_error: ion_free(client, audio->input_buff_handle); input_buff_alloc_error: ion_unmap_kernel(client, audio->output_buff_handle); output_buff_map_error: output_buff_get_phys_error: output_buff_get_flags_error: ion_free(client, audio->output_buff_handle); output_buff_alloc_error: ion_client_destroy(client); client_create_error: msm_adsp_put(audio->audrec); audpreproc_aenc_free(audio->enc_id); mutex_unlock(&audio->lock); return rc; } static const struct file_operations audio_qcelp_in_fops = { .owner = THIS_MODULE, .open = audqcelp_in_open, .release = audqcelp_in_release, .read = audqcelp_in_read, .write = audqcelp_in_write, .fsync = audqcelp_in_fsync, .unlocked_ioctl = audqcelp_in_ioctl, }; static struct miscdevice audqcelp_in_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_qcelp_in", .fops = &audio_qcelp_in_fops, }; static int __init audqcelp_in_init(void) { mutex_init(&the_audio_qcelp_in.lock); mutex_init(&the_audio_qcelp_in.read_lock); spin_lock_init(&the_audio_qcelp_in.dsp_lock); init_waitqueue_head(&the_audio_qcelp_in.wait); init_waitqueue_head(&the_audio_qcelp_in.wait_enable); mutex_init(&the_audio_qcelp_in.write_lock); init_waitqueue_head(&the_audio_qcelp_in.write_wait); return misc_register(&audqcelp_in_misc); } device_initcall(audqcelp_in_init);
gpl-2.0
Cold-D/linux
arch/cris/arch-v32/drivers/sync_serial.c
330
45426
/* * Simple synchronous serial port driver for ETRAX FS and Artpec-3. * * Copyright (c) 2005 Axis Communications AB * * Author: Mikael Starvik * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/major.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/interrupt.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <asm/io.h> #include <dma.h> #include <pinmux.h> #include <hwregs/reg_rdwr.h> #include <hwregs/sser_defs.h> #include <hwregs/dma_defs.h> #include <hwregs/dma.h> #include <hwregs/intr_vect_defs.h> #include <hwregs/intr_vect.h> #include <hwregs/reg_map.h> #include <asm/sync_serial.h> /* The receiver is a bit tricky because of the continuous stream of data.*/ /* */ /* Three DMA descriptors are linked together. Each DMA descriptor is */ /* responsible for port->bufchunk of a common buffer. */ /* */ /* +---------------------------------------------+ */ /* | +----------+ +----------+ +----------+ | */ /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */ /* +----------+ +----------+ +----------+ */ /* | | | */ /* v v v */ /* +-------------------------------------+ */ /* | BUFFER | */ /* +-------------------------------------+ */ /* |<- data_avail ->| */ /* readp writep */ /* */ /* If the application keeps up the pace readp will be right after writep.*/ /* If the application can't keep the pace we have to throw away data. */ /* The idea is that readp should be ready with the data pointed out by */ /* Descr[i] when the DMA has filled in Descr[i+1]. */ /* Otherwise we will discard */ /* the rest of the data pointed out by Descr1 and set readp to the start */ /* of Descr2 */ #define SYNC_SERIAL_MAJOR 125 /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */ /* words can be handled */ #define IN_BUFFER_SIZE 12288 #define IN_DESCR_SIZE 256 #define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE) #define OUT_BUFFER_SIZE 1024*8 #define NBR_OUT_DESCR 8 #define DEFAULT_FRAME_RATE 0 #define DEFAULT_WORD_RATE 7 /* NOTE: Enabling some debug will likely cause overrun or underrun, * especially if manual mode is use. */ #define DEBUG(x) #define DEBUGREAD(x) #define DEBUGWRITE(x) #define DEBUGPOLL(x) #define DEBUGRXINT(x) #define DEBUGTXINT(x) #define DEBUGTRDMA(x) #define DEBUGOUTBUF(x) typedef struct sync_port { reg_scope_instances regi_sser; reg_scope_instances regi_dmain; reg_scope_instances regi_dmaout; char started; /* 1 if port has been started */ char port_nbr; /* Port 0 or 1 */ char busy; /* 1 if port is busy */ char enabled; /* 1 if port is enabled */ char use_dma; /* 1 if port uses dma */ char tr_running; char init_irqs; int output; int input; /* Next byte to be read by application */ volatile unsigned char *volatile readp; /* Next byte to be written by etrax */ volatile unsigned char *volatile writep; unsigned int in_buffer_size; unsigned int inbufchunk; unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32))); unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32))); unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32))); struct dma_descr_data* next_rx_desc; struct dma_descr_data* prev_rx_desc; /* Pointer to the first available descriptor in the ring, * unless active_tr_descr == catch_tr_descr and a dma * transfer is active */ struct dma_descr_data *active_tr_descr; /* Pointer to the first allocated descriptor in the ring */ struct dma_descr_data *catch_tr_descr; /* Pointer to the descriptor with the current end-of-list */ struct dma_descr_data *prev_tr_descr; int full; /* Pointer to the first byte being read by DMA * or current position in out_buffer if not using DMA. */ unsigned char *out_rd_ptr; /* Number of bytes currently locked for being read by DMA */ int out_buf_count; dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16))); dma_descr_context in_context __attribute__ ((__aligned__(32))); dma_descr_data out_descr[NBR_OUT_DESCR] __attribute__ ((__aligned__(16))); dma_descr_context out_context __attribute__ ((__aligned__(32))); wait_queue_head_t out_wait_q; wait_queue_head_t in_wait_q; spinlock_t lock; } sync_port; static DEFINE_MUTEX(sync_serial_mutex); static int etrax_sync_serial_init(void); static void initialize_port(int portnbr); static inline int sync_data_avail(struct sync_port *port); static int sync_serial_open(struct inode *, struct file*); static int sync_serial_release(struct inode*, struct file*); static unsigned int sync_serial_poll(struct file *filp, poll_table *wait); static int sync_serial_ioctl(struct file *, unsigned int cmd, unsigned long arg); static ssize_t sync_serial_write(struct file * file, const char * buf, size_t count, loff_t *ppos); static ssize_t sync_serial_read(struct file *file, char *buf, size_t count, loff_t *ppos); #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \ defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \ (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \ defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)) #define SYNC_SER_DMA #endif static void send_word(sync_port* port); static void start_dma_out(struct sync_port *port, const char *data, int count); static void start_dma_in(sync_port* port); #ifdef SYNC_SER_DMA static irqreturn_t tr_interrupt(int irq, void *dev_id); static irqreturn_t rx_interrupt(int irq, void *dev_id); #endif #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \ !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \ (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \ !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)) #define SYNC_SER_MANUAL #endif #ifdef SYNC_SER_MANUAL static irqreturn_t manual_interrupt(int irq, void *dev_id); #endif #ifdef CONFIG_ETRAXFS /* ETRAX FS */ #define OUT_DMA_NBR 4 #define IN_DMA_NBR 5 #define PINMUX_SSER pinmux_sser0 #define SYNCSER_INST regi_sser0 #define SYNCSER_INTR_VECT SSER0_INTR_VECT #define OUT_DMA_INST regi_dma4 #define IN_DMA_INST regi_dma5 #define DMA_OUT_INTR_VECT DMA4_INTR_VECT #define DMA_IN_INTR_VECT DMA5_INTR_VECT #define REQ_DMA_SYNCSER dma_sser0 #else /* Artpec-3 */ #define OUT_DMA_NBR 6 #define IN_DMA_NBR 7 #define PINMUX_SSER pinmux_sser #define SYNCSER_INST regi_sser #define SYNCSER_INTR_VECT SSER_INTR_VECT #define OUT_DMA_INST regi_dma6 #define IN_DMA_INST regi_dma7 #define DMA_OUT_INTR_VECT DMA6_INTR_VECT #define DMA_IN_INTR_VECT DMA7_INTR_VECT #define REQ_DMA_SYNCSER dma_sser #endif /* The ports */ static struct sync_port ports[]= { { .regi_sser = SYNCSER_INST, .regi_dmaout = OUT_DMA_INST, .regi_dmain = IN_DMA_INST, #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA) .use_dma = 1, #else .use_dma = 0, #endif } #ifdef CONFIG_ETRAXFS , { .regi_sser = regi_sser1, .regi_dmaout = regi_dma6, .regi_dmain = regi_dma7, #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA) .use_dma = 1, #else .use_dma = 0, #endif } #endif }; #define NBR_PORTS ARRAY_SIZE(ports) static const struct file_operations sync_serial_fops = { .owner = THIS_MODULE, .write = sync_serial_write, .read = sync_serial_read, .poll = sync_serial_poll, .unlocked_ioctl = sync_serial_ioctl, .open = sync_serial_open, .release = sync_serial_release, .llseek = noop_llseek, }; static int __init etrax_sync_serial_init(void) { ports[0].enabled = 0; #ifdef CONFIG_ETRAXFS ports[1].enabled = 0; #endif if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial", &sync_serial_fops) < 0) { printk(KERN_WARNING "Unable to get major for synchronous serial port\n"); return -EBUSY; } /* Initialize Ports */ #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) { printk(KERN_WARNING "Unable to alloc pins for synchronous serial port 0\n"); return -EIO; } ports[0].enabled = 1; initialize_port(0); #endif #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) { printk(KERN_WARNING "Unable to alloc pins for synchronous serial port 0\n"); return -EIO; } ports[1].enabled = 1; initialize_port(1); #endif #ifdef CONFIG_ETRAXFS printk(KERN_INFO "ETRAX FS synchronous serial port driver\n"); #else printk(KERN_INFO "Artpec-3 synchronous serial port driver\n"); #endif return 0; } static void __init initialize_port(int portnbr) { int __attribute__((unused)) i; struct sync_port *port = &ports[portnbr]; reg_sser_rw_cfg cfg = {0}; reg_sser_rw_frm_cfg frm_cfg = {0}; reg_sser_rw_tr_cfg tr_cfg = {0}; reg_sser_rw_rec_cfg rec_cfg = {0}; DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr)); port->port_nbr = portnbr; port->init_irqs = 1; port->out_rd_ptr = port->out_buffer; port->out_buf_count = 0; port->output = 1; port->input = 0; port->readp = port->flip; port->writep = port->flip; port->in_buffer_size = IN_BUFFER_SIZE; port->inbufchunk = IN_DESCR_SIZE; port->next_rx_desc = &port->in_descr[0]; port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1]; port->prev_rx_desc->eol = 1; init_waitqueue_head(&port->out_wait_q); init_waitqueue_head(&port->in_wait_q); spin_lock_init(&port->lock); cfg.out_clk_src = regk_sser_intern_clk; cfg.out_clk_pol = regk_sser_pos; cfg.clk_od_mode = regk_sser_no; cfg.clk_dir = regk_sser_out; cfg.gate_clk = regk_sser_no; cfg.base_freq = regk_sser_f29_493; cfg.clk_div = 256; REG_WR(sser, port->regi_sser, rw_cfg, cfg); frm_cfg.wordrate = DEFAULT_WORD_RATE; frm_cfg.type = regk_sser_edge; frm_cfg.frame_pin_dir = regk_sser_out; frm_cfg.frame_pin_use = regk_sser_frm; frm_cfg.status_pin_dir = regk_sser_in; frm_cfg.status_pin_use = regk_sser_hold; frm_cfg.out_on = regk_sser_tr; frm_cfg.tr_delay = 1; REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg); tr_cfg.urun_stop = regk_sser_no; tr_cfg.sample_size = 7; tr_cfg.sh_dir = regk_sser_msbfirst; tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no; #if 0 tr_cfg.rate_ctrl = regk_sser_bulk; tr_cfg.data_pin_use = regk_sser_dout; #else tr_cfg.rate_ctrl = regk_sser_iso; tr_cfg.data_pin_use = regk_sser_dout; #endif tr_cfg.bulk_wspace = 1; REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); rec_cfg.sample_size = 7; rec_cfg.sh_dir = regk_sser_msbfirst; rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no; rec_cfg.fifo_thr = regk_sser_inf; REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); #ifdef SYNC_SER_DMA /* Setup the descriptor ring for dma out/transmit. */ for (i = 0; i < NBR_OUT_DESCR; i++) { port->out_descr[i].wait = 0; port->out_descr[i].intr = 1; port->out_descr[i].eol = 0; port->out_descr[i].out_eop = 0; port->out_descr[i].next = (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]); } /* Create a ring from the list. */ port->out_descr[NBR_OUT_DESCR-1].next = (dma_descr_data *)virt_to_phys(&port->out_descr[0]); /* Setup context for traversing the ring. */ port->active_tr_descr = &port->out_descr[0]; port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1]; port->catch_tr_descr = &port->out_descr[0]; #endif } static inline int sync_data_avail(struct sync_port *port) { int avail; unsigned char *start; unsigned char *end; start = (unsigned char*)port->readp; /* cast away volatile */ end = (unsigned char*)port->writep; /* cast away volatile */ /* 0123456789 0123456789 * ----- - ----- * ^rp ^wp ^wp ^rp */ if (end >= start) avail = end - start; else avail = port->in_buffer_size - (start - end); return avail; } static inline int sync_data_avail_to_end(struct sync_port *port) { int avail; unsigned char *start; unsigned char *end; start = (unsigned char*)port->readp; /* cast away volatile */ end = (unsigned char*)port->writep; /* cast away volatile */ /* 0123456789 0123456789 * ----- ----- * ^rp ^wp ^wp ^rp */ if (end >= start) avail = end - start; else avail = port->flip + port->in_buffer_size - start; return avail; } static int sync_serial_open(struct inode *inode, struct file *file) { int dev = iminor(inode); int ret = -EBUSY; sync_port *port; reg_dma_rw_cfg cfg = {.en = regk_dma_yes}; reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes}; mutex_lock(&sync_serial_mutex); DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev)); if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev)); ret = -ENODEV; goto out; } port = &ports[dev]; /* Allow open this device twice (assuming one reader and one writer) */ if (port->busy == 2) { DEBUG(printk(KERN_DEBUG "Device is busy.. \n")); goto out; } if (port->init_irqs) { if (port->use_dma) { if (port == &ports[0]) { #ifdef SYNC_SER_DMA if (request_irq(DMA_OUT_INTR_VECT, tr_interrupt, 0, "synchronous serial 0 dma tr", &ports[0])) { printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ"); goto out; } else if (request_irq(DMA_IN_INTR_VECT, rx_interrupt, 0, "synchronous serial 1 dma rx", &ports[0])) { free_irq(DMA_OUT_INTR_VECT, &port[0]); printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ"); goto out; } else if (crisv32_request_dma(OUT_DMA_NBR, "synchronous serial 0 dma tr", DMA_VERBOSE_ON_ERROR, 0, REQ_DMA_SYNCSER)) { free_irq(DMA_OUT_INTR_VECT, &port[0]); free_irq(DMA_IN_INTR_VECT, &port[0]); printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel"); goto out; } else if (crisv32_request_dma(IN_DMA_NBR, "synchronous serial 0 dma rec", DMA_VERBOSE_ON_ERROR, 0, REQ_DMA_SYNCSER)) { crisv32_free_dma(OUT_DMA_NBR); free_irq(DMA_OUT_INTR_VECT, &port[0]); free_irq(DMA_IN_INTR_VECT, &port[0]); printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel"); goto out; } #endif } #ifdef CONFIG_ETRAXFS else if (port == &ports[1]) { #ifdef SYNC_SER_DMA if (request_irq(DMA6_INTR_VECT, tr_interrupt, 0, "synchronous serial 1 dma tr", &ports[1])) { printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ"); goto out; } else if (request_irq(DMA7_INTR_VECT, rx_interrupt, 0, "synchronous serial 1 dma rx", &ports[1])) { free_irq(DMA6_INTR_VECT, &ports[1]); printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ"); goto out; } else if (crisv32_request_dma( SYNC_SER1_TX_DMA_NBR, "synchronous serial 1 dma tr", DMA_VERBOSE_ON_ERROR, 0, dma_sser1)) { free_irq(DMA6_INTR_VECT, &ports[1]); free_irq(DMA7_INTR_VECT, &ports[1]); printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel"); goto out; } else if (crisv32_request_dma( SYNC_SER1_RX_DMA_NBR, "synchronous serial 3 dma rec", DMA_VERBOSE_ON_ERROR, 0, dma_sser1)) { crisv32_free_dma(SYNC_SER1_TX_DMA_NBR); free_irq(DMA6_INTR_VECT, &ports[1]); free_irq(DMA7_INTR_VECT, &ports[1]); printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel"); goto out; } #endif } #endif /* Enable DMAs */ REG_WR(dma, port->regi_dmain, rw_cfg, cfg); REG_WR(dma, port->regi_dmaout, rw_cfg, cfg); /* Enable DMA IRQs */ REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask); REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask); /* Set up wordsize = 1 for DMAs. */ DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1); DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1); start_dma_in(port); port->init_irqs = 0; } else { /* !port->use_dma */ #ifdef SYNC_SER_MANUAL if (port == &ports[0]) { if (request_irq(SYNCSER_INTR_VECT, manual_interrupt, 0, "synchronous serial manual irq", &ports[0])) { printk("Can't allocate sync serial manual irq"); goto out; } } #ifdef CONFIG_ETRAXFS else if (port == &ports[1]) { if (request_irq(SSER1_INTR_VECT, manual_interrupt, 0, "synchronous serial manual irq", &ports[1])) { printk(KERN_CRIT "Can't allocate sync serial manual irq"); goto out; } } #endif port->init_irqs = 0; #else panic("sync_serial: Manual mode not supported.\n"); #endif /* SYNC_SER_MANUAL */ } } /* port->init_irqs */ port->busy++; ret = 0; out: mutex_unlock(&sync_serial_mutex); return ret; } static int sync_serial_release(struct inode *inode, struct file *file) { int dev = iminor(inode); sync_port *port; if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { DEBUG(printk("Invalid minor %d\n", dev)); return -ENODEV; } port = &ports[dev]; if (port->busy) port->busy--; if (!port->busy) /* XXX */ ; return 0; } static unsigned int sync_serial_poll(struct file *file, poll_table *wait) { int dev = iminor(file_inode(file)); unsigned int mask = 0; sync_port *port; DEBUGPOLL( static unsigned int prev_mask = 0; ); port = &ports[dev]; if (!port->started) { reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); cfg.en = regk_sser_yes; rec_cfg.rec_en = port->input; REG_WR(sser, port->regi_sser, rw_cfg, cfg); REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); port->started = 1; } poll_wait(file, &port->out_wait_q, wait); poll_wait(file, &port->in_wait_q, wait); /* No active transfer, descriptors are available */ if (port->output && !port->tr_running) mask |= POLLOUT | POLLWRNORM; /* Descriptor and buffer space available. */ if (port->output && port->active_tr_descr != port->catch_tr_descr && port->out_buf_count < OUT_BUFFER_SIZE) mask |= POLLOUT | POLLWRNORM; /* At least an inbufchunk of data */ if (port->input && sync_data_avail(port) >= port->inbufchunk) mask |= POLLIN | POLLRDNORM; DEBUGPOLL(if (mask != prev_mask) printk("sync_serial_poll: mask 0x%08X %s %s\n", mask, mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":""); prev_mask = mask; ); return mask; } static int sync_serial_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int return_val = 0; int dma_w_size = regk_dma_set_w_size1; int dev = iminor(file_inode(file)); sync_port *port; reg_sser_rw_tr_cfg tr_cfg; reg_sser_rw_rec_cfg rec_cfg; reg_sser_rw_frm_cfg frm_cfg; reg_sser_rw_cfg gen_cfg; reg_sser_rw_intr_mask intr_mask; if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { DEBUG(printk("Invalid minor %d\n", dev)); return -1; } port = &ports[dev]; spin_lock_irq(&port->lock); tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg); gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg); intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); switch(cmd) { case SSP_SPEED: if (GET_SPEED(arg) == CODEC) { unsigned int freq; gen_cfg.base_freq = regk_sser_f32; /* Clock divider will internally be * gen_cfg.clk_div + 1. */ freq = GET_FREQ(arg); switch (freq) { case FREQ_32kHz: case FREQ_64kHz: case FREQ_128kHz: case FREQ_256kHz: gen_cfg.clk_div = 125 * (1 << (freq - FREQ_256kHz)) - 1; break; case FREQ_512kHz: gen_cfg.clk_div = 62; break; case FREQ_1MHz: case FREQ_2MHz: case FREQ_4MHz: gen_cfg.clk_div = 8 * (1 << freq) - 1; break; } } else { gen_cfg.base_freq = regk_sser_f29_493; switch (GET_SPEED(arg)) { case SSP150: gen_cfg.clk_div = 29493000 / (150 * 8) - 1; break; case SSP300: gen_cfg.clk_div = 29493000 / (300 * 8) - 1; break; case SSP600: gen_cfg.clk_div = 29493000 / (600 * 8) - 1; break; case SSP1200: gen_cfg.clk_div = 29493000 / (1200 * 8) - 1; break; case SSP2400: gen_cfg.clk_div = 29493000 / (2400 * 8) - 1; break; case SSP4800: gen_cfg.clk_div = 29493000 / (4800 * 8) - 1; break; case SSP9600: gen_cfg.clk_div = 29493000 / (9600 * 8) - 1; break; case SSP19200: gen_cfg.clk_div = 29493000 / (19200 * 8) - 1; break; case SSP28800: gen_cfg.clk_div = 29493000 / (28800 * 8) - 1; break; case SSP57600: gen_cfg.clk_div = 29493000 / (57600 * 8) - 1; break; case SSP115200: gen_cfg.clk_div = 29493000 / (115200 * 8) - 1; break; case SSP230400: gen_cfg.clk_div = 29493000 / (230400 * 8) - 1; break; case SSP460800: gen_cfg.clk_div = 29493000 / (460800 * 8) - 1; break; case SSP921600: gen_cfg.clk_div = 29493000 / (921600 * 8) - 1; break; case SSP3125000: gen_cfg.base_freq = regk_sser_f100; gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1; break; } } frm_cfg.wordrate = GET_WORD_RATE(arg); break; case SSP_MODE: switch(arg) { case MASTER_OUTPUT: port->output = 1; port->input = 0; frm_cfg.out_on = regk_sser_tr; frm_cfg.frame_pin_dir = regk_sser_out; gen_cfg.clk_dir = regk_sser_out; break; case SLAVE_OUTPUT: port->output = 1; port->input = 0; frm_cfg.frame_pin_dir = regk_sser_in; gen_cfg.clk_dir = regk_sser_in; break; case MASTER_INPUT: port->output = 0; port->input = 1; frm_cfg.frame_pin_dir = regk_sser_out; frm_cfg.out_on = regk_sser_intern_tb; gen_cfg.clk_dir = regk_sser_out; break; case SLAVE_INPUT: port->output = 0; port->input = 1; frm_cfg.frame_pin_dir = regk_sser_in; gen_cfg.clk_dir = regk_sser_in; break; case MASTER_BIDIR: port->output = 1; port->input = 1; frm_cfg.frame_pin_dir = regk_sser_out; frm_cfg.out_on = regk_sser_intern_tb; gen_cfg.clk_dir = regk_sser_out; break; case SLAVE_BIDIR: port->output = 1; port->input = 1; frm_cfg.frame_pin_dir = regk_sser_in; gen_cfg.clk_dir = regk_sser_in; break; default: spin_unlock_irq(&port->lock); return -EINVAL; } if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT)) intr_mask.rdav = regk_sser_yes; break; case SSP_FRAME_SYNC: if (arg & NORMAL_SYNC) { frm_cfg.rec_delay = 1; frm_cfg.tr_delay = 1; } else if (arg & EARLY_SYNC) frm_cfg.rec_delay = frm_cfg.tr_delay = 0; else if (arg & SECOND_WORD_SYNC) { frm_cfg.rec_delay = 7; frm_cfg.tr_delay = 1; } tr_cfg.bulk_wspace = frm_cfg.tr_delay; frm_cfg.early_wend = regk_sser_yes; if (arg & BIT_SYNC) frm_cfg.type = regk_sser_edge; else if (arg & WORD_SYNC) frm_cfg.type = regk_sser_level; else if (arg & EXTENDED_SYNC) frm_cfg.early_wend = regk_sser_no; if (arg & SYNC_ON) frm_cfg.frame_pin_use = regk_sser_frm; else if (arg & SYNC_OFF) frm_cfg.frame_pin_use = regk_sser_gio0; dma_w_size = regk_dma_set_w_size2; if (arg & WORD_SIZE_8) { rec_cfg.sample_size = tr_cfg.sample_size = 7; dma_w_size = regk_dma_set_w_size1; } else if (arg & WORD_SIZE_12) rec_cfg.sample_size = tr_cfg.sample_size = 11; else if (arg & WORD_SIZE_16) rec_cfg.sample_size = tr_cfg.sample_size = 15; else if (arg & WORD_SIZE_24) rec_cfg.sample_size = tr_cfg.sample_size = 23; else if (arg & WORD_SIZE_32) rec_cfg.sample_size = tr_cfg.sample_size = 31; if (arg & BIT_ORDER_MSB) rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst; else if (arg & BIT_ORDER_LSB) rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst; if (arg & FLOW_CONTROL_ENABLE) { frm_cfg.status_pin_use = regk_sser_frm; rec_cfg.fifo_thr = regk_sser_thr16; } else if (arg & FLOW_CONTROL_DISABLE) { frm_cfg.status_pin_use = regk_sser_gio0; rec_cfg.fifo_thr = regk_sser_inf; } if (arg & CLOCK_NOT_GATED) gen_cfg.gate_clk = regk_sser_no; else if (arg & CLOCK_GATED) gen_cfg.gate_clk = regk_sser_yes; break; case SSP_IPOLARITY: /* NOTE!! negedge is considered NORMAL */ if (arg & CLOCK_NORMAL) rec_cfg.clk_pol = regk_sser_neg; else if (arg & CLOCK_INVERT) rec_cfg.clk_pol = regk_sser_pos; if (arg & FRAME_NORMAL) frm_cfg.level = regk_sser_pos_hi; else if (arg & FRAME_INVERT) frm_cfg.level = regk_sser_neg_lo; if (arg & STATUS_NORMAL) gen_cfg.hold_pol = regk_sser_pos; else if (arg & STATUS_INVERT) gen_cfg.hold_pol = regk_sser_neg; break; case SSP_OPOLARITY: if (arg & CLOCK_NORMAL) gen_cfg.out_clk_pol = regk_sser_pos; else if (arg & CLOCK_INVERT) gen_cfg.out_clk_pol = regk_sser_neg; if (arg & FRAME_NORMAL) frm_cfg.level = regk_sser_pos_hi; else if (arg & FRAME_INVERT) frm_cfg.level = regk_sser_neg_lo; if (arg & STATUS_NORMAL) gen_cfg.hold_pol = regk_sser_pos; else if (arg & STATUS_INVERT) gen_cfg.hold_pol = regk_sser_neg; break; case SSP_SPI: rec_cfg.fifo_thr = regk_sser_inf; rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst; rec_cfg.sample_size = tr_cfg.sample_size = 7; frm_cfg.frame_pin_use = regk_sser_frm; frm_cfg.type = regk_sser_level; frm_cfg.tr_delay = 1; frm_cfg.level = regk_sser_neg_lo; if (arg & SPI_SLAVE) { rec_cfg.clk_pol = regk_sser_neg; gen_cfg.clk_dir = regk_sser_in; port->input = 1; port->output = 0; } else { gen_cfg.out_clk_pol = regk_sser_pos; port->input = 0; port->output = 1; gen_cfg.clk_dir = regk_sser_out; } break; case SSP_INBUFCHUNK: break; default: return_val = -1; } if (port->started) { rec_cfg.rec_en = port->input; gen_cfg.en = (port->output | port->input); } REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg); REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 | WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) { int en = gen_cfg.en; gen_cfg.en = 0; REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); /* ##### Should DMA be stoped before we change dma size? */ DMA_WR_CMD(port->regi_dmain, dma_w_size); DMA_WR_CMD(port->regi_dmaout, dma_w_size); gen_cfg.en = en; REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); } spin_unlock_irq(&port->lock); return return_val; } static long sync_serial_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; mutex_lock(&sync_serial_mutex); ret = sync_serial_ioctl_unlocked(file, cmd, arg); mutex_unlock(&sync_serial_mutex); return ret; } /* NOTE: sync_serial_write does not support concurrency */ static ssize_t sync_serial_write(struct file *file, const char *buf, size_t count, loff_t *ppos) { int dev = iminor(file_inode(file)); DECLARE_WAITQUEUE(wait, current); struct sync_port *port; int trunc_count; unsigned long flags; int bytes_free; int out_buf_count; unsigned char *rd_ptr; /* First allocated byte in the buffer */ unsigned char *wr_ptr; /* First free byte in the buffer */ unsigned char *buf_stop_ptr; /* Last byte + 1 */ if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { DEBUG(printk("Invalid minor %d\n", dev)); return -ENODEV; } port = &ports[dev]; /* |<- OUT_BUFFER_SIZE ->| * |<- out_buf_count ->| * |<- trunc_count ->| ...->| * ______________________________________________________ * | free | data | free | * |_________|___________________|________________________| * ^ rd_ptr ^ wr_ptr */ DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n", port->port_nbr, count, port->active_tr_descr, port->catch_tr_descr)); /* Read variables that may be updated by interrupts */ spin_lock_irqsave(&port->lock, flags); rd_ptr = port->out_rd_ptr; out_buf_count = port->out_buf_count; spin_unlock_irqrestore(&port->lock, flags); /* Check if resources are available */ if (port->tr_running && ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) || out_buf_count >= OUT_BUFFER_SIZE)) { DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev)); return -EAGAIN; } buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE; /* Determine pointer to the first free byte, before copying. */ wr_ptr = rd_ptr + out_buf_count; if (wr_ptr >= buf_stop_ptr) wr_ptr -= OUT_BUFFER_SIZE; /* If we wrap the ring buffer, let the user space program handle it by * truncating the data. This could be more elegant, small buffer * fragments may occur. */ bytes_free = OUT_BUFFER_SIZE - out_buf_count; if (wr_ptr + bytes_free > buf_stop_ptr) bytes_free = buf_stop_ptr - wr_ptr; trunc_count = (count < bytes_free) ? count : bytes_free; if (copy_from_user(wr_ptr, buf, trunc_count)) return -EFAULT; DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n", out_buf_count, trunc_count, port->out_buf_count, port->out_buffer, wr_ptr, buf_stop_ptr)); /* Make sure transmitter/receiver is running */ if (!port->started) { reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); cfg.en = regk_sser_yes; rec_cfg.rec_en = port->input; REG_WR(sser, port->regi_sser, rw_cfg, cfg); REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); port->started = 1; } /* Setup wait if blocking */ if (!(file->f_flags & O_NONBLOCK)) { add_wait_queue(&port->out_wait_q, &wait); set_current_state(TASK_INTERRUPTIBLE); } spin_lock_irqsave(&port->lock, flags); port->out_buf_count += trunc_count; if (port->use_dma) { start_dma_out(port, wr_ptr, trunc_count); } else if (!port->tr_running) { reg_sser_rw_intr_mask intr_mask; intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); /* Start sender by writing data */ send_word(port); /* and enable transmitter ready IRQ */ intr_mask.trdy = 1; REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); } spin_unlock_irqrestore(&port->lock, flags); /* Exit if non blocking */ if (file->f_flags & O_NONBLOCK) { DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n", port->port_nbr, trunc_count, REG_RD_INT(dma, port->regi_dmaout, r_intr))); return trunc_count; } schedule(); remove_wait_queue(&port->out_wait_q, &wait); if (signal_pending(current)) return -EINTR; DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n", port->port_nbr, trunc_count)); return trunc_count; } static ssize_t sync_serial_read(struct file * file, char * buf, size_t count, loff_t *ppos) { int dev = iminor(file_inode(file)); int avail; sync_port *port; unsigned char* start; unsigned char* end; unsigned long flags; if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { DEBUG(printk("Invalid minor %d\n", dev)); return -ENODEV; } port = &ports[dev]; DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size)); if (!port->started) { reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); cfg.en = regk_sser_yes; tr_cfg.tr_en = regk_sser_yes; rec_cfg.rec_en = regk_sser_yes; REG_WR(sser, port->regi_sser, rw_cfg, cfg); REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); port->started = 1; } /* Calculate number of available bytes */ /* Save pointers to avoid that they are modified by interrupt */ spin_lock_irqsave(&port->lock, flags); start = (unsigned char*)port->readp; /* cast away volatile */ end = (unsigned char*)port->writep; /* cast away volatile */ spin_unlock_irqrestore(&port->lock, flags); while ((start == end) && !port->full) /* No data */ { DEBUGREAD(printk(KERN_DEBUG "&")); if (file->f_flags & O_NONBLOCK) return -EAGAIN; wait_event_interruptible(port->in_wait_q, !(start == end && !port->full)); if (signal_pending(current)) return -EINTR; spin_lock_irqsave(&port->lock, flags); start = (unsigned char*)port->readp; /* cast away volatile */ end = (unsigned char*)port->writep; /* cast away volatile */ spin_unlock_irqrestore(&port->lock, flags); } /* Lazy read, never return wrapped data. */ if (port->full) avail = port->in_buffer_size; else if (end > start) avail = end - start; else avail = port->flip + port->in_buffer_size - start; count = count > avail ? avail : count; if (copy_to_user(buf, start, count)) return -EFAULT; /* Disable interrupts while updating readp */ spin_lock_irqsave(&port->lock, flags); port->readp += count; if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */ port->readp = port->flip; port->full = 0; spin_unlock_irqrestore(&port->lock, flags); DEBUGREAD(printk("r %d\n", count)); return count; } static void send_word(sync_port* port) { reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); reg_sser_rw_tr_data tr_data = {0}; switch(tr_cfg.sample_size) { case 8: port->out_buf_count--; tr_data.data = *port->out_rd_ptr++; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; break; case 12: { int data = (*port->out_rd_ptr++) << 8; data |= *port->out_rd_ptr++; port->out_buf_count -= 2; tr_data.data = data; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; } break; case 16: port->out_buf_count -= 2; tr_data.data = *(unsigned short *)port->out_rd_ptr; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); port->out_rd_ptr += 2; if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; break; case 24: port->out_buf_count -= 3; tr_data.data = *(unsigned short *)port->out_rd_ptr; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); port->out_rd_ptr += 2; tr_data.data = *port->out_rd_ptr++; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; break; case 32: port->out_buf_count -= 4; tr_data.data = *(unsigned short *)port->out_rd_ptr; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); port->out_rd_ptr += 2; tr_data.data = *(unsigned short *)port->out_rd_ptr; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); port->out_rd_ptr += 2; if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; break; } } static void start_dma_out(struct sync_port *port, const char *data, int count) { port->active_tr_descr->buf = (char *) virt_to_phys((char *) data); port->active_tr_descr->after = port->active_tr_descr->buf + count; port->active_tr_descr->intr = 1; port->active_tr_descr->eol = 1; port->prev_tr_descr->eol = 0; DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n", port->prev_tr_descr, port->active_tr_descr)); port->prev_tr_descr = port->active_tr_descr; port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next); if (!port->tr_running) { reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); port->out_context.next = 0; port->out_context.saved_data = (dma_descr_data *)virt_to_phys(port->prev_tr_descr); port->out_context.saved_data_buf = port->prev_tr_descr->buf; DMA_START_CONTEXT(port->regi_dmaout, virt_to_phys((char *)&port->out_context)); tr_cfg.tr_en = regk_sser_yes; REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); DEBUGTRDMA(printk(KERN_DEBUG "dma s\n");); } else { DMA_CONTINUE_DATA(port->regi_dmaout); DEBUGTRDMA(printk(KERN_DEBUG "dma c\n");); } port->tr_running = 1; } static void start_dma_in(sync_port *port) { int i; char *buf; port->writep = port->flip; if (port->writep > port->flip + port->in_buffer_size) { panic("Offset too large in sync serial driver\n"); return; } buf = (char*)virt_to_phys(port->in_buffer); for (i = 0; i < NBR_IN_DESCR; i++) { port->in_descr[i].buf = buf; port->in_descr[i].after = buf + port->inbufchunk; port->in_descr[i].intr = 1; port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]); port->in_descr[i].buf = buf; buf += port->inbufchunk; } /* Link the last descriptor to the first */ port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]); port->in_descr[i-1].eol = regk_sser_yes; port->next_rx_desc = &port->in_descr[0]; port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1]; port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]); port->in_context.saved_data_buf = port->in_descr[0].buf; DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context)); } #ifdef SYNC_SER_DMA static irqreturn_t tr_interrupt(int irq, void *dev_id) { reg_dma_r_masked_intr masked; reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes}; reg_dma_rw_stat stat; int i; int found = 0; int stop_sser = 0; for (i = 0; i < NBR_PORTS; i++) { sync_port *port = &ports[i]; if (!port->enabled || !port->use_dma) continue; /* IRQ active for the port? */ masked = REG_RD(dma, port->regi_dmaout, r_masked_intr); if (!masked.data) continue; found = 1; /* Check if we should stop the DMA transfer */ stat = REG_RD(dma, port->regi_dmaout, rw_stat); if (stat.list_state == regk_dma_data_at_eol) stop_sser = 1; /* Clear IRQ */ REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr); if (!stop_sser) { /* The DMA has completed a descriptor, EOL was not * encountered, so step relevant descriptor and * datapointers forward. */ int sent; sent = port->catch_tr_descr->after - port->catch_tr_descr->buf; DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t" "in descr %p (ac: %p)\n", port->out_buf_count, sent, port->out_buf_count - sent, port->catch_tr_descr, port->active_tr_descr);); port->out_buf_count -= sent; port->catch_tr_descr = phys_to_virt((int) port->catch_tr_descr->next); port->out_rd_ptr = phys_to_virt((int) port->catch_tr_descr->buf); } else { int i, sent; /* EOL handler. * Note that if an EOL was encountered during the irq * locked section of sync_ser_write the DMA will be * restarted and the eol flag will be cleared. * The remaining descriptors will be traversed by * the descriptor interrupts as usual. */ i = 0; while (!port->catch_tr_descr->eol) { sent = port->catch_tr_descr->after - port->catch_tr_descr->buf; DEBUGOUTBUF(printk(KERN_DEBUG "traversing descr %p -%d (%d)\n", port->catch_tr_descr, sent, port->out_buf_count)); port->out_buf_count -= sent; port->catch_tr_descr = phys_to_virt( (int)port->catch_tr_descr->next); i++; if (i >= NBR_OUT_DESCR) { /* TODO: Reset and recover */ panic("sync_serial: missing eol"); } } sent = port->catch_tr_descr->after - port->catch_tr_descr->buf; DEBUGOUTBUF(printk(KERN_DEBUG "eol at descr %p -%d (%d)\n", port->catch_tr_descr, sent, port->out_buf_count)); port->out_buf_count -= sent; /* Update read pointer to first free byte, we * may already be writing data there. */ port->out_rd_ptr = phys_to_virt((int) port->catch_tr_descr->after); if (port->out_rd_ptr > port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); DEBUGTXINT(printk(KERN_DEBUG "tr_int DMA stop %d, set catch @ %p\n", port->out_buf_count, port->active_tr_descr)); if (port->out_buf_count != 0) printk(KERN_CRIT "sync_ser: buffer not " "empty after eol.\n"); port->catch_tr_descr = port->active_tr_descr; port->tr_running = 0; tr_cfg.tr_en = regk_sser_no; REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); } /* wake up the waiting process */ wake_up_interruptible(&port->out_wait_q); } return IRQ_RETVAL(found); } /* tr_interrupt */ static irqreturn_t rx_interrupt(int irq, void *dev_id) { reg_dma_r_masked_intr masked; reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes}; int i; int found = 0; for (i = 0; i < NBR_PORTS; i++) { sync_port *port = &ports[i]; if (!port->enabled || !port->use_dma ) continue; masked = REG_RD(dma, port->regi_dmain, r_masked_intr); if (masked.data) /* Descriptor interrupt */ { found = 1; while (REG_RD(dma, port->regi_dmain, rw_data) != virt_to_phys(port->next_rx_desc)) { DEBUGRXINT(printk(KERN_DEBUG "!")); if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) { int first_size = port->flip + port->in_buffer_size - port->writep; memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size); memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size); port->writep = port->flip + port->inbufchunk - first_size; } else { memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), port->inbufchunk); port->writep += port->inbufchunk; if (port->writep >= port->flip + port->in_buffer_size) port->writep = port->flip; } if (port->writep == port->readp) { port->full = 1; } port->next_rx_desc->eol = 1; port->prev_rx_desc->eol = 0; /* Cache bug workaround */ flush_dma_descr(port->prev_rx_desc, 0); port->prev_rx_desc = port->next_rx_desc; port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next); /* Cache bug workaround */ flush_dma_descr(port->prev_rx_desc, 1); /* wake up the waiting process */ wake_up_interruptible(&port->in_wait_q); DMA_CONTINUE(port->regi_dmain); REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr); } } } return IRQ_RETVAL(found); } /* rx_interrupt */ #endif /* SYNC_SER_DMA */ #ifdef SYNC_SER_MANUAL static irqreturn_t manual_interrupt(int irq, void *dev_id) { int i; int found = 0; reg_sser_r_masked_intr masked; for (i = 0; i < NBR_PORTS; i++) { sync_port *port = &ports[i]; if (!port->enabled || port->use_dma) { continue; } masked = REG_RD(sser, port->regi_sser, r_masked_intr); if (masked.rdav) /* Data received? */ { reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data); found = 1; /* Read data */ switch(rec_cfg.sample_size) { case 8: *port->writep++ = data.data & 0xff; break; case 12: *port->writep = (data.data & 0x0ff0) >> 4; *(port->writep + 1) = data.data & 0x0f; port->writep+=2; break; case 16: *(unsigned short*)port->writep = data.data; port->writep+=2; break; case 24: *(unsigned int*)port->writep = data.data; port->writep+=3; break; case 32: *(unsigned int*)port->writep = data.data; port->writep+=4; break; } if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */ port->writep = port->flip; if (port->writep == port->readp) { /* receive buffer overrun, discard oldest data */ port->readp++; if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */ port->readp = port->flip; } if (sync_data_avail(port) >= port->inbufchunk) wake_up_interruptible(&port->in_wait_q); /* Wake up application */ } if (masked.trdy) /* Transmitter ready? */ { found = 1; if (port->out_buf_count > 0) /* More data to send */ send_word(port); else /* transmission finished */ { reg_sser_rw_intr_mask intr_mask; intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); intr_mask.trdy = 0; REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); wake_up_interruptible(&port->out_wait_q); /* Wake up application */ } } } return IRQ_RETVAL(found); } #endif module_init(etrax_sync_serial_init);
gpl-2.0
BOOTMGR/lge_victo_msm7x30-CM
drivers/usb/serial/option.c
330
60852
/* USB Driver for GSM modems Copyright (C) 2005 Matthias Urlichs <smurf@smurf.noris.de> This driver is free software; you can redistribute it and/or modify it under the terms of Version 2 of the GNU General Public License as published by the Free Software Foundation. Portions copied from the Keyspan driver by Hugh Blemings <hugh@blemings.org> History: see the git log. Work sponsored by: Sigos GmbH, Germany <info@sigos.de> This driver exists because the "normal" serial driver doesn't work too well with GSM modems. Issues: - data loss -- one single Receive URB is not nearly enough - nonstandard flow (Option devices) control - controlling the baud rate doesn't make sense This driver is named "option" because the most common device it's used for is a PC-Card (with an internal OHCI-USB interface, behind which the GSM interface sits), made by Option Inc. Some of the "one port" devices actually exhibit multiple USB instances on the USB bus. This is not a bug, these ports are used for different device features. */ #define DRIVER_VERSION "v0.7.2" #define DRIVER_AUTHOR "Matthias Urlichs <smurf@smurf.noris.de>" #define DRIVER_DESC "USB Driver for GSM modems" #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include "usb-wwan.h" /* Function prototypes */ static int option_probe(struct usb_serial *serial, const struct usb_device_id *id); static int option_send_setup(struct usb_serial_port *port); static void option_instat_callback(struct urb *urb); /* Vendor and product IDs */ #define OPTION_VENDOR_ID 0x0AF0 #define OPTION_PRODUCT_COLT 0x5000 #define OPTION_PRODUCT_RICOLA 0x6000 #define OPTION_PRODUCT_RICOLA_LIGHT 0x6100 #define OPTION_PRODUCT_RICOLA_QUAD 0x6200 #define OPTION_PRODUCT_RICOLA_QUAD_LIGHT 0x6300 #define OPTION_PRODUCT_RICOLA_NDIS 0x6050 #define OPTION_PRODUCT_RICOLA_NDIS_LIGHT 0x6150 #define OPTION_PRODUCT_RICOLA_NDIS_QUAD 0x6250 #define OPTION_PRODUCT_RICOLA_NDIS_QUAD_LIGHT 0x6350 #define OPTION_PRODUCT_COBRA 0x6500 #define OPTION_PRODUCT_COBRA_BUS 0x6501 #define OPTION_PRODUCT_VIPER 0x6600 #define OPTION_PRODUCT_VIPER_BUS 0x6601 #define OPTION_PRODUCT_GT_MAX_READY 0x6701 #define OPTION_PRODUCT_FUJI_MODEM_LIGHT 0x6721 #define OPTION_PRODUCT_FUJI_MODEM_GT 0x6741 #define OPTION_PRODUCT_FUJI_MODEM_EX 0x6761 #define OPTION_PRODUCT_KOI_MODEM 0x6800 #define OPTION_PRODUCT_SCORPION_MODEM 0x6901 #define OPTION_PRODUCT_ETNA_MODEM 0x7001 #define OPTION_PRODUCT_ETNA_MODEM_LITE 0x7021 #define OPTION_PRODUCT_ETNA_MODEM_GT 0x7041 #define OPTION_PRODUCT_ETNA_MODEM_EX 0x7061 #define OPTION_PRODUCT_ETNA_KOI_MODEM 0x7100 #define OPTION_PRODUCT_GTM380_MODEM 0x7201 #define HUAWEI_VENDOR_ID 0x12D1 #define HUAWEI_PRODUCT_E600 0x1001 #define HUAWEI_PRODUCT_E220 0x1003 #define HUAWEI_PRODUCT_E220BIS 0x1004 #define HUAWEI_PRODUCT_E1401 0x1401 #define HUAWEI_PRODUCT_E1402 0x1402 #define HUAWEI_PRODUCT_E1403 0x1403 #define HUAWEI_PRODUCT_E1404 0x1404 #define HUAWEI_PRODUCT_E1405 0x1405 #define HUAWEI_PRODUCT_E1406 0x1406 #define HUAWEI_PRODUCT_E1407 0x1407 #define HUAWEI_PRODUCT_E1408 0x1408 #define HUAWEI_PRODUCT_E1409 0x1409 #define HUAWEI_PRODUCT_E140A 0x140A #define HUAWEI_PRODUCT_E140B 0x140B #define HUAWEI_PRODUCT_E140C 0x140C #define HUAWEI_PRODUCT_E140D 0x140D #define HUAWEI_PRODUCT_E140E 0x140E #define HUAWEI_PRODUCT_E140F 0x140F #define HUAWEI_PRODUCT_E1410 0x1410 #define HUAWEI_PRODUCT_E1411 0x1411 #define HUAWEI_PRODUCT_E1412 0x1412 #define HUAWEI_PRODUCT_E1413 0x1413 #define HUAWEI_PRODUCT_E1414 0x1414 #define HUAWEI_PRODUCT_E1415 0x1415 #define HUAWEI_PRODUCT_E1416 0x1416 #define HUAWEI_PRODUCT_E1417 0x1417 #define HUAWEI_PRODUCT_E1418 0x1418 #define HUAWEI_PRODUCT_E1419 0x1419 #define HUAWEI_PRODUCT_E141A 0x141A #define HUAWEI_PRODUCT_E141B 0x141B #define HUAWEI_PRODUCT_E141C 0x141C #define HUAWEI_PRODUCT_E141D 0x141D #define HUAWEI_PRODUCT_E141E 0x141E #define HUAWEI_PRODUCT_E141F 0x141F #define HUAWEI_PRODUCT_E1420 0x1420 #define HUAWEI_PRODUCT_E1421 0x1421 #define HUAWEI_PRODUCT_E1422 0x1422 #define HUAWEI_PRODUCT_E1423 0x1423 #define HUAWEI_PRODUCT_E1424 0x1424 #define HUAWEI_PRODUCT_E1425 0x1425 #define HUAWEI_PRODUCT_E1426 0x1426 #define HUAWEI_PRODUCT_E1427 0x1427 #define HUAWEI_PRODUCT_E1428 0x1428 #define HUAWEI_PRODUCT_E1429 0x1429 #define HUAWEI_PRODUCT_E142A 0x142A #define HUAWEI_PRODUCT_E142B 0x142B #define HUAWEI_PRODUCT_E142C 0x142C #define HUAWEI_PRODUCT_E142D 0x142D #define HUAWEI_PRODUCT_E142E 0x142E #define HUAWEI_PRODUCT_E142F 0x142F #define HUAWEI_PRODUCT_E1430 0x1430 #define HUAWEI_PRODUCT_E1431 0x1431 #define HUAWEI_PRODUCT_E1432 0x1432 #define HUAWEI_PRODUCT_E1433 0x1433 #define HUAWEI_PRODUCT_E1434 0x1434 #define HUAWEI_PRODUCT_E1435 0x1435 #define HUAWEI_PRODUCT_E1436 0x1436 #define HUAWEI_PRODUCT_E1437 0x1437 #define HUAWEI_PRODUCT_E1438 0x1438 #define HUAWEI_PRODUCT_E1439 0x1439 #define HUAWEI_PRODUCT_E143A 0x143A #define HUAWEI_PRODUCT_E143B 0x143B #define HUAWEI_PRODUCT_E143C 0x143C #define HUAWEI_PRODUCT_E143D 0x143D #define HUAWEI_PRODUCT_E143E 0x143E #define HUAWEI_PRODUCT_E143F 0x143F #define HUAWEI_PRODUCT_K4505 0x1464 #define HUAWEI_PRODUCT_K3765 0x1465 #define HUAWEI_PRODUCT_E14AC 0x14AC #define HUAWEI_PRODUCT_ETS1220 0x1803 #define QUANTA_VENDOR_ID 0x0408 #define QUANTA_PRODUCT_Q101 0xEA02 #define QUANTA_PRODUCT_Q111 0xEA03 #define QUANTA_PRODUCT_GLX 0xEA04 #define QUANTA_PRODUCT_GKE 0xEA05 #define QUANTA_PRODUCT_GLE 0xEA06 #define NOVATELWIRELESS_VENDOR_ID 0x1410 /* YISO PRODUCTS */ #define YISO_VENDOR_ID 0x0EAB #define YISO_PRODUCT_U893 0xC893 /* MERLIN EVDO PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_V640 0x1100 #define NOVATELWIRELESS_PRODUCT_V620 0x1110 #define NOVATELWIRELESS_PRODUCT_V740 0x1120 #define NOVATELWIRELESS_PRODUCT_V720 0x1130 /* MERLIN HSDPA/HSPA PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_U730 0x1400 #define NOVATELWIRELESS_PRODUCT_U740 0x1410 #define NOVATELWIRELESS_PRODUCT_U870 0x1420 #define NOVATELWIRELESS_PRODUCT_XU870 0x1430 #define NOVATELWIRELESS_PRODUCT_X950D 0x1450 /* EXPEDITE PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_EV620 0x2100 #define NOVATELWIRELESS_PRODUCT_ES720 0x2110 #define NOVATELWIRELESS_PRODUCT_E725 0x2120 #define NOVATELWIRELESS_PRODUCT_ES620 0x2130 #define NOVATELWIRELESS_PRODUCT_EU730 0x2400 #define NOVATELWIRELESS_PRODUCT_EU740 0x2410 #define NOVATELWIRELESS_PRODUCT_EU870D 0x2420 /* OVATION PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 #define NOVATELWIRELESS_PRODUCT_U727 0x5010 #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 #define NOVATELWIRELESS_PRODUCT_MC760 0x6000 #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 /* FUTURE NOVATEL PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001 #define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000 #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001 #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0X8000 #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0X8001 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0X9000 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0X9001 #define NOVATELWIRELESS_PRODUCT_GLOBAL 0XA001 /* AMOI PRODUCTS */ #define AMOI_VENDOR_ID 0x1614 #define AMOI_PRODUCT_H01 0x0800 #define AMOI_PRODUCT_H01A 0x7002 #define AMOI_PRODUCT_H02 0x0802 #define AMOI_PRODUCT_SKYPEPHONE_S2 0x0407 #define DELL_VENDOR_ID 0x413C /* Dell modems */ #define DELL_PRODUCT_5700_MINICARD 0x8114 #define DELL_PRODUCT_5500_MINICARD 0x8115 #define DELL_PRODUCT_5505_MINICARD 0x8116 #define DELL_PRODUCT_5700_EXPRESSCARD 0x8117 #define DELL_PRODUCT_5510_EXPRESSCARD 0x8118 #define DELL_PRODUCT_5700_MINICARD_SPRINT 0x8128 #define DELL_PRODUCT_5700_MINICARD_TELUS 0x8129 #define DELL_PRODUCT_5720_MINICARD_VZW 0x8133 #define DELL_PRODUCT_5720_MINICARD_SPRINT 0x8134 #define DELL_PRODUCT_5720_MINICARD_TELUS 0x8135 #define DELL_PRODUCT_5520_MINICARD_CINGULAR 0x8136 #define DELL_PRODUCT_5520_MINICARD_GENERIC_L 0x8137 #define DELL_PRODUCT_5520_MINICARD_GENERIC_I 0x8138 #define DELL_PRODUCT_5730_MINICARD_SPRINT 0x8180 #define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181 #define DELL_PRODUCT_5730_MINICARD_VZW 0x8182 #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da #define KYOCERA_PRODUCT_KPC680 0x180a #define ANYDATA_VENDOR_ID 0x16d5 #define ANYDATA_PRODUCT_ADU_620UW 0x6202 #define ANYDATA_PRODUCT_ADU_E100A 0x6501 #define ANYDATA_PRODUCT_ADU_500A 0x6502 #define AXESSTEL_VENDOR_ID 0x1726 #define AXESSTEL_PRODUCT_MV110H 0x1000 #define BANDRICH_VENDOR_ID 0x1A8D #define BANDRICH_PRODUCT_C100_1 0x1002 #define BANDRICH_PRODUCT_C100_2 0x1003 #define BANDRICH_PRODUCT_1004 0x1004 #define BANDRICH_PRODUCT_1005 0x1005 #define BANDRICH_PRODUCT_1006 0x1006 #define BANDRICH_PRODUCT_1007 0x1007 #define BANDRICH_PRODUCT_1008 0x1008 #define BANDRICH_PRODUCT_1009 0x1009 #define BANDRICH_PRODUCT_100A 0x100a #define BANDRICH_PRODUCT_100B 0x100b #define BANDRICH_PRODUCT_100C 0x100c #define BANDRICH_PRODUCT_100D 0x100d #define BANDRICH_PRODUCT_100E 0x100e #define BANDRICH_PRODUCT_100F 0x100f #define BANDRICH_PRODUCT_1010 0x1010 #define BANDRICH_PRODUCT_1011 0x1011 #define BANDRICH_PRODUCT_1012 0x1012 #define AMOI_VENDOR_ID 0x1614 #define AMOI_PRODUCT_9508 0x0800 #define QUALCOMM_VENDOR_ID 0x05C6 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6008 0x6008 #define CMOTECH_PRODUCT_6280 0x6280 #define TELIT_VENDOR_ID 0x1bc7 #define TELIT_PRODUCT_UC864E 0x1003 #define TELIT_PRODUCT_UC864G 0x1004 /* ZTE PRODUCTS */ #define ZTE_VENDOR_ID 0x19d2 #define ZTE_PRODUCT_MF622 0x0001 #define ZTE_PRODUCT_MF628 0x0015 #define ZTE_PRODUCT_MF626 0x0031 #define ZTE_PRODUCT_CDMA_TECH 0xfffe #define ZTE_PRODUCT_AC8710 0xfff1 #define ZTE_PRODUCT_AC2726 0xfff5 #define ZTE_PRODUCT_AC8710T 0xffff /* ZTE PRODUCTS -- alternate vendor ID */ #define ZTE_VENDOR_ID2 0x1d6b #define ZTE_PRODUCT_MF_330 0x0002 #define BENQ_VENDOR_ID 0x04a5 #define BENQ_PRODUCT_H10 0x4068 #define DLINK_VENDOR_ID 0x1186 #define DLINK_PRODUCT_DWM_652 0x3e04 #define DLINK_PRODUCT_DWM_652_U5 0xce16 #define DLINK_PRODUCT_DWM_652_U5A 0xce1e #define QISDA_VENDOR_ID 0x1da5 #define QISDA_PRODUCT_H21_4512 0x4512 #define QISDA_PRODUCT_H21_4523 0x4523 #define QISDA_PRODUCT_H20_4515 0x4515 #define QISDA_PRODUCT_H20_4518 0x4518 #define QISDA_PRODUCT_H20_4519 0x4519 /* TLAYTECH PRODUCTS */ #define TLAYTECH_VENDOR_ID 0x20B9 #define TLAYTECH_PRODUCT_TEU800 0x1682 /* TOSHIBA PRODUCTS */ #define TOSHIBA_VENDOR_ID 0x0930 #define TOSHIBA_PRODUCT_HSDPA_MINICARD 0x1302 #define TOSHIBA_PRODUCT_G450 0x0d45 #define ALINK_VENDOR_ID 0x1e0e #define ALINK_PRODUCT_3GU 0x9200 /* ALCATEL PRODUCTS */ #define ALCATEL_VENDOR_ID 0x1bbb #define ALCATEL_PRODUCT_X060S 0x0000 #define PIRELLI_VENDOR_ID 0x1266 #define PIRELLI_PRODUCT_C100_1 0x1002 #define PIRELLI_PRODUCT_C100_2 0x1003 #define PIRELLI_PRODUCT_1004 0x1004 #define PIRELLI_PRODUCT_1005 0x1005 #define PIRELLI_PRODUCT_1006 0x1006 #define PIRELLI_PRODUCT_1007 0x1007 #define PIRELLI_PRODUCT_1008 0x1008 #define PIRELLI_PRODUCT_1009 0x1009 #define PIRELLI_PRODUCT_100A 0x100a #define PIRELLI_PRODUCT_100B 0x100b #define PIRELLI_PRODUCT_100C 0x100c #define PIRELLI_PRODUCT_100D 0x100d #define PIRELLI_PRODUCT_100E 0x100e #define PIRELLI_PRODUCT_100F 0x100f #define PIRELLI_PRODUCT_1011 0x1011 #define PIRELLI_PRODUCT_1012 0x1012 /* Airplus products */ #define AIRPLUS_VENDOR_ID 0x1011 #define AIRPLUS_PRODUCT_MCD650 0x3198 /* Longcheer/Longsung vendor ID; makes whitelabel devices that * many other vendors like 4G Systems, Alcatel, ChinaBird, * Mobidata, etc sell under their own brand names. */ #define LONGCHEER_VENDOR_ID 0x1c9e /* 4G Systems products */ /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick * * It seems to contain a Qualcomm QSC6240/6290 chipset */ #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 /* Haier products */ #define HAIER_VENDOR_ID 0x201e #define HAIER_PRODUCT_CE100 0x2009 #define CINTERION_VENDOR_ID 0x0681 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c #define OLIVETTI_PRODUCT_OLICARD100 0xc000 /* Celot products */ #define CELOT_VENDOR_ID 0x211f #define CELOT_PRODUCT_CT680M 0x6801 /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, OPTION_BLACKLIST_SENDSETUP = 1, OPTION_BLACKLIST_RESERVED_IF = 2 }; struct option_blacklist_info { const u32 infolen; /* number of interface numbers on blacklist */ const u8 *ifaceinfo; /* pointer to the array holding the numbers */ enum option_blacklist_reason reason; }; static const u8 four_g_w14_no_sendsetup[] = { 0, 1 }; static const struct option_blacklist_info four_g_w14_blacklist = { .infolen = ARRAY_SIZE(four_g_w14_no_sendsetup), .ifaceinfo = four_g_w14_no_sendsetup, .reason = OPTION_BLACKLIST_SENDSETUP }; static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_QUAD) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_QUAD_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS_QUAD) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS_QUAD_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COBRA) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COBRA_BUS) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_VIPER) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_VIPER_BUS) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_GT_MAX_READY) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_GT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_EX) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_KOI_MODEM) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_SCORPION_MODEM) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_LITE) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_GT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_EX) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_KOI_MODEM) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_GTM380_MODEM) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q101) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q111) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_9508) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, /* Novatel Merlin EX720/V740/X720 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, /* Novatel Merlin V720/S720/PC720 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, /* Novatel U730/U740 (VF version) */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, /* Novatel U740 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, /* Novatel U870 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, /* Novatel Merlin XU870 HSDPA/3G */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, /* Novatel X950D */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, /* Novatel EV620/ES620 CDMA/EV-DO */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, /* Novatel ES620/ES720/U720/USB720 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, /* Novatel E725/E726 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, /* Novatel Merlin ES620 SM Bus */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, /* Novatel EU730 and Vodafone EU740 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, /* Novatel non-Vodafone EU740 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, /* Novatel EVDO product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, /* Novatel HSPA product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, /* Novatel EVDO Embedded product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, /* Novatel HSPA Embedded product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL) }, /* Novatel Global product */ { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_SKYPEPHONE_S2) }, { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5500_MINICARD) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5505_MINICARD) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_EXPRESSCARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO ExpressCard == Novatel Merlin XV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5510_EXPRESSCARD) }, /* Dell Wireless 5510 Mobile Broadband HSDPA ExpressCard == Novatel Merlin XU870 HSDPA/3G */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_SPRINT) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite E720 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_TELUS) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite ET620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_VZW) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_SPRINT) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_TELUS) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_CINGULAR) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_L) }, /* Dell Wireless HSDPA 5520 */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_I) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1004) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1005) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1006) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1007) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1008) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1009) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100A) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100B) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100C) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100D) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100E) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100F) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1010) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1011) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0006, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0007, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0008, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0009, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000a, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000b, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000c, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000d, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000e, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0146, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0149, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0150, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0154, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1060, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1061, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1062, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1063, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1064, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1065, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1066, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1067, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1068, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1069, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1070, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1071, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1072, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1073, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1074, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1075, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1076, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1077, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1078, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1079, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1080, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1081, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1082, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1083, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1084, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1085, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1086, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1087, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1088, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1089, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1090, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1091, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1092, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1093, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1094, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1095, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1096, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1097, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1098, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1099, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1100, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1101, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1102, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1103, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1104, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1105, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1106, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1107, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1108, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1109, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1110, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1111, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1112, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1113, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1114, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1115, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1116, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1117, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1118, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1119, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1120, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1121, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1122, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1123, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1124, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1125, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1126, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1127, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1128, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1129, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1130, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1131, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1132, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1133, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1134, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1135, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1136, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1137, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1138, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1139, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1140, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1141, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1142, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1143, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1144, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1145, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1146, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1147, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1148, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1149, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1150, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1151, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1152, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1153, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1154, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1155, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1156, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1157, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1158, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1159, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1160, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1161, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1162, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1163, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1164, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1165, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1166, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1167, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1168, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1260, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1261, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1262, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1263, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1264, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1265, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1266, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1279, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1280, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1281, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1282, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1283, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1284, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1285, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1286, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1287, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1288, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1289, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1290, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1291, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1292, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1293, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1294, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1295, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1296, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1297, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, { USB_DEVICE(ZTE_VENDOR_ID2, ZTE_PRODUCT_MF_330) }, { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */ { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5A) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4518) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4519) }, { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) }, { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), .driver_info = (kernel_ulong_t)&four_g_w14_blacklist }, { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, /* Pirelli */ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1004)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1005)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1006)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1007)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1008)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1009)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100A)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100B) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100C) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100D) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100E) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)}, { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); static struct usb_driver option_driver = { .name = "option", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, #ifdef CONFIG_PM .suspend = usb_serial_suspend, .resume = usb_serial_resume, .supports_autosuspend = 1, #endif .id_table = option_ids, .no_dynamic_id = 1, }; /* The card has three separate interfaces, which the serial driver * recognizes separately, thus num_port=1. */ static struct usb_serial_driver option_1port_device = { .driver = { .owner = THIS_MODULE, .name = "option1", }, .description = "GSM modem (1-port)", .usb_driver = &option_driver, .id_table = option_ids, .num_ports = 1, .probe = option_probe, .open = usb_wwan_open, .close = usb_wwan_close, .dtr_rts = usb_wwan_dtr_rts, .write = usb_wwan_write, .write_room = usb_wwan_write_room, .chars_in_buffer = usb_wwan_chars_in_buffer, .set_termios = usb_wwan_set_termios, .tiocmget = usb_wwan_tiocmget, .tiocmset = usb_wwan_tiocmset, .attach = usb_wwan_startup, .disconnect = usb_wwan_disconnect, .release = usb_wwan_release, .read_int_callback = option_instat_callback, #ifdef CONFIG_PM .suspend = usb_wwan_suspend, .resume = usb_wwan_resume, #endif }; static int debug; /* per port private data */ #define N_IN_URB 4 #define N_OUT_URB 4 #define IN_BUFLEN 4096 #define OUT_BUFLEN 4096 struct option_port_private { /* Input endpoints and buffer for this port */ struct urb *in_urbs[N_IN_URB]; u8 *in_buffer[N_IN_URB]; /* Output endpoints and buffer for this port */ struct urb *out_urbs[N_OUT_URB]; u8 *out_buffer[N_OUT_URB]; unsigned long out_busy; /* Bit vector of URBs in use */ int opened; struct usb_anchor delayed; /* Settings for the port */ int rts_state; /* Handshaking pins (outputs) */ int dtr_state; int cts_state; /* Handshaking pins (inputs) */ int dsr_state; int dcd_state; int ri_state; unsigned long tx_start_time[N_OUT_URB]; }; /* Functions used by new usb-serial code. */ static int __init option_init(void) { int retval; retval = usb_serial_register(&option_1port_device); if (retval) goto failed_1port_device_register; retval = usb_register(&option_driver); if (retval) goto failed_driver_register; printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); return 0; failed_driver_register: usb_serial_deregister(&option_1port_device); failed_1port_device_register: return retval; } static void __exit option_exit(void) { usb_deregister(&option_driver); usb_serial_deregister(&option_1port_device); } module_init(option_init); module_exit(option_exit); static int option_probe(struct usb_serial *serial, const struct usb_device_id *id) { struct usb_wwan_intf_private *data; /* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */ if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID && serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 && serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8) return -ENODEV; /* Bandrich modem and AT command interface is 0xff */ if ((serial->dev->descriptor.idVendor == BANDRICH_VENDOR_ID || serial->dev->descriptor.idVendor == PIRELLI_VENDOR_ID) && serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff) return -ENODEV; /* Don't bind network interfaces on Huawei K3765 & K4505 */ if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID && (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 || serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505) && serial->interface->cur_altsetting->desc.bInterfaceNumber == 1) return -ENODEV; data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL); if (!data) return -ENOMEM; data->send_setup = option_send_setup; spin_lock_init(&data->susp_lock); data->private = (void *)id->driver_info; return 0; } static enum option_blacklist_reason is_blacklisted(const u8 ifnum, const struct option_blacklist_info *blacklist) { const u8 *info; int i; if (blacklist) { info = blacklist->ifaceinfo; for (i = 0; i < blacklist->infolen; i++) { if (info[i] == ifnum) return blacklist->reason; } } return OPTION_BLACKLIST_NONE; } static void option_instat_callback(struct urb *urb) { int err; int status = urb->status; struct usb_serial_port *port = urb->context; struct option_port_private *portdata = usb_get_serial_port_data(port); dbg("%s", __func__); dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); if (status == 0) { struct usb_ctrlrequest *req_pkt = (struct usb_ctrlrequest *)urb->transfer_buffer; if (!req_pkt) { dbg("%s: NULL req_pkt", __func__); return; } if ((req_pkt->bRequestType == 0xA1) && (req_pkt->bRequest == 0x20)) { int old_dcd_state; unsigned char signals = *((unsigned char *) urb->transfer_buffer + sizeof(struct usb_ctrlrequest)); dbg("%s: signal x%x", __func__, signals); old_dcd_state = portdata->dcd_state; portdata->cts_state = 1; portdata->dcd_state = ((signals & 0x01) ? 1 : 0); portdata->dsr_state = ((signals & 0x02) ? 1 : 0); portdata->ri_state = ((signals & 0x08) ? 1 : 0); if (old_dcd_state && !portdata->dcd_state) { struct tty_struct *tty = tty_port_tty_get(&port->port); if (tty && !C_CLOCAL(tty)) tty_hangup(tty); tty_kref_put(tty); } } else { dbg("%s: type %x req %x", __func__, req_pkt->bRequestType, req_pkt->bRequest); } } else err("%s: error %d", __func__, status); /* Resubmit urb so we continue receiving IRQ data */ if (status != -ESHUTDOWN && status != -ENOENT) { err = usb_submit_urb(urb, GFP_ATOMIC); if (err) dbg("%s: resubmit intr urb failed. (%d)", __func__, err); } } /** send RTS/DTR state to the port. * * This is exactly the same as SET_CONTROL_LINE_STATE from the PSTN * CDC. */ static int option_send_setup(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct usb_wwan_intf_private *intfdata = (struct usb_wwan_intf_private *) serial->private; struct option_port_private *portdata; int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber; int val = 0; dbg("%s", __func__); if (is_blacklisted(ifNum, (struct option_blacklist_info *) intfdata->private) == OPTION_BLACKLIST_SENDSETUP) { dbg("No send_setup on blacklisted interface #%d\n", ifNum); return -EIO; } portdata = usb_get_serial_port_data(port); if (portdata->dtr_state) val |= 0x01; if (portdata->rts_state) val |= 0x02; return usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 0x22, 0x21, val, ifNum, NULL, 0, USB_CTRL_SET_TIMEOUT); } MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug messages");
gpl-2.0
DerArtem/android_kernel_dell_streak7
sound/soc/s3c24xx/jive_wm8750.c
842
4680
/* sound/soc/s3c24xx/jive_wm8750.c * * Copyright 2007,2008 Simtec Electronics * * Based on sound/soc/pxa/spitz.c * Copyright 2005 Wolfson Microelectronics PLC. * Copyright 2005 Openedhand Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <asm/mach-types.h> #include "s3c-dma.h" #include "s3c2412-i2s.h" #include "../codecs/wm8750.h" static const struct snd_soc_dapm_route audio_map[] = { { "Headphone Jack", NULL, "LOUT1" }, { "Headphone Jack", NULL, "ROUT1" }, { "Internal Speaker", NULL, "LOUT2" }, { "Internal Speaker", NULL, "ROUT2" }, { "LINPUT1", NULL, "Line Input" }, { "RINPUT1", NULL, "Line Input" }, }; static const struct snd_soc_dapm_widget wm8750_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_SPK("Internal Speaker", NULL), SND_SOC_DAPM_LINE("Line In", NULL), }; static int jive_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->dai->codec_dai; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; struct s3c_i2sv2_rate_calc div; unsigned int clk = 0; int ret = 0; switch (params_rate(params)) { case 8000: case 16000: case 48000: case 96000: clk = 12288000; break; case 11025: case 22050: case 44100: clk = 11289600; break; } s3c_i2sv2_iis_calc_rate(&div, NULL, params_rate(params), s3c_i2sv2_get_clock(cpu_dai)); /* set codec DAI configuration */ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; /* set cpu DAI configuration */ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; /* set the codec system clock for DAC and ADC */ ret = snd_soc_dai_set_sysclk(codec_dai, WM8750_SYSCLK, clk, SND_SOC_CLOCK_IN); if (ret < 0) return ret; ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C2412_DIV_RCLK, div.fs_div); if (ret < 0) return ret; ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C2412_DIV_PRESCALER, div.clk_div - 1); if (ret < 0) return ret; return 0; } static struct snd_soc_ops jive_ops = { .hw_params = jive_hw_params, }; static int jive_wm8750_init(struct snd_soc_codec *codec) { int err; /* These endpoints are not being used. */ snd_soc_dapm_nc_pin(codec, "LINPUT2"); snd_soc_dapm_nc_pin(codec, "RINPUT2"); snd_soc_dapm_nc_pin(codec, "LINPUT3"); snd_soc_dapm_nc_pin(codec, "RINPUT3"); snd_soc_dapm_nc_pin(codec, "OUT3"); snd_soc_dapm_nc_pin(codec, "MONO"); /* Add jive specific widgets */ err = snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets, ARRAY_SIZE(wm8750_dapm_widgets)); if (err) { printk(KERN_ERR "%s: failed to add widgets (%d)\n", __func__, err); return err; } snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); snd_soc_dapm_sync(codec); return 0; } static struct snd_soc_dai_link jive_dai = { .name = "wm8750", .stream_name = "WM8750", .cpu_dai = &s3c2412_i2s_dai, .codec_dai = &wm8750_dai, .init = jive_wm8750_init, .ops = &jive_ops, }; /* jive audio machine driver */ static struct snd_soc_card snd_soc_machine_jive = { .name = "Jive", .platform = &s3c24xx_soc_platform, .dai_link = &jive_dai, .num_links = 1, }; /* jive audio subsystem */ static struct snd_soc_device jive_snd_devdata = { .card = &snd_soc_machine_jive, .codec_dev = &soc_codec_dev_wm8750, }; static struct platform_device *jive_snd_device; static int __init jive_init(void) { int ret; if (!machine_is_jive()) return 0; printk("JIVE WM8750 Audio support\n"); jive_snd_device = platform_device_alloc("soc-audio", -1); if (!jive_snd_device) return -ENOMEM; platform_set_drvdata(jive_snd_device, &jive_snd_devdata); jive_snd_devdata.dev = &jive_snd_device->dev; ret = platform_device_add(jive_snd_device); if (ret) platform_device_put(jive_snd_device); return ret; } static void __exit jive_exit(void) { platform_device_unregister(jive_snd_device); } module_init(jive_init); module_exit(jive_exit); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_DESCRIPTION("ALSA SoC Jive Audio support"); MODULE_LICENSE("GPL");
gpl-2.0
leshak/i5700-leshak-kernel
net/ipv4/netfilter/nf_nat_proto_dccp.c
1610
2906
/* * DCCP NAT protocol helper * * Copyright (c) 2005, 2006. 2008 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/dccp.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_protocol.h> static u_int16_t dccp_port_rover; static bool dccp_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &dccp_port_rover); } static bool dccp_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { const struct iphdr *iph = (const void *)(skb->data + iphdroff); struct dccp_hdr *hdr; unsigned int hdroff = iphdroff + iph->ihl * 4; __be32 oldip, newip; __be16 *portptr, oldport, newport; int hdrsize = 8; /* DCCP connection tracking guarantees this much */ if (skb->len >= hdroff + sizeof(struct dccp_hdr)) hdrsize = sizeof(struct dccp_hdr); if (!skb_make_writable(skb, hdroff + hdrsize)) return false; iph = (struct iphdr *)(skb->data + iphdroff); hdr = (struct dccp_hdr *)(skb->data + hdroff); if (maniptype == IP_NAT_MANIP_SRC) { oldip = iph->saddr; newip = tuple->src.u3.ip; newport = tuple->src.u.dccp.port; portptr = &hdr->dccph_sport; } else { oldip = iph->daddr; newip = tuple->dst.u3.ip; newport = tuple->dst.u.dccp.port; portptr = &hdr->dccph_dport; } oldport = *portptr; *portptr = newport; if (hdrsize < sizeof(*hdr)) return true; inet_proto_csum_replace4(&hdr->dccph_checksum, skb, oldip, newip, 1); inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport, 0); return true; } static const struct nf_nat_protocol nf_nat_protocol_dccp = { .protonum = IPPROTO_DCCP, .me = THIS_MODULE, .manip_pkt = dccp_manip_pkt, .in_range = nf_nat_proto_in_range, .unique_tuple = dccp_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .range_to_nlattr = nf_nat_proto_range_to_nlattr, .nlattr_to_range = nf_nat_proto_nlattr_to_range, #endif }; static int __init nf_nat_proto_dccp_init(void) { return nf_nat_protocol_register(&nf_nat_protocol_dccp); } static void __exit nf_nat_proto_dccp_fini(void) { nf_nat_protocol_unregister(&nf_nat_protocol_dccp); } module_init(nf_nat_proto_dccp_init); module_exit(nf_nat_proto_dccp_fini); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_DESCRIPTION("DCCP NAT protocol helper"); MODULE_LICENSE("GPL");
gpl-2.0
darkspr1te/android_kernel_samsung_msm8916_a5
arch/arm/mach-kirkwood/ts219-setup.c
2634
3591
/* * * QNAP TS-11x/TS-21x Turbo NAS Board Setup * * Copyright (C) 2009 Martin Michlmayr <tbm@cyrius.com> * Copyright (C) 2008 Byron Bradley <byron.bbradley@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/mv643xx_eth.h> #include <linux/ata_platform.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/kirkwood.h> #include "common.h" #include "mpp.h" #include "tsx1x-common.h" static struct i2c_board_info __initdata qnap_ts219_i2c_rtc = { I2C_BOARD_INFO("s35390a", 0x30), }; static struct mv643xx_eth_platform_data qnap_ts219_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; static struct mv_sata_platform_data qnap_ts219_sata_data = { .n_ports = 2, }; static struct gpio_keys_button qnap_ts219_buttons[] = { { .code = KEY_COPY, .gpio = 15, .desc = "USB Copy", .active_low = 1, }, { .code = KEY_RESTART, .gpio = 16, .desc = "Reset", .active_low = 1, }, }; static struct gpio_keys_platform_data qnap_ts219_button_data = { .buttons = qnap_ts219_buttons, .nbuttons = ARRAY_SIZE(qnap_ts219_buttons), }; static struct platform_device qnap_ts219_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &qnap_ts219_button_data, } }; static unsigned int qnap_ts219_mpp_config[] __initdata = { MPP0_SPI_SCn, MPP1_SPI_MOSI, MPP2_SPI_SCK, MPP3_SPI_MISO, MPP4_SATA1_ACTn, MPP5_SATA0_ACTn, MPP8_TW0_SDA, MPP9_TW0_SCK, MPP10_UART0_TXD, MPP11_UART0_RXD, MPP13_UART1_TXD, /* PIC controller */ MPP14_UART1_RXD, /* PIC controller */ MPP15_GPIO, /* USB Copy button (on devices with 88F6281) */ MPP16_GPIO, /* Reset button (on devices with 88F6281) */ MPP36_GPIO, /* RAM: 0: 256 MB, 1: 512 MB */ MPP37_GPIO, /* Reset button (on devices with 88F6282) */ MPP43_GPIO, /* USB Copy button (on devices with 88F6282) */ MPP44_GPIO, /* Board ID: 0: TS-11x, 1: TS-21x */ 0 }; static void __init qnap_ts219_init(void) { u32 dev, rev; /* * Basic setup. Needs to be called early. */ kirkwood_init(); kirkwood_mpp_conf(qnap_ts219_mpp_config); kirkwood_uart0_init(); kirkwood_uart1_init(); /* A PIC controller is connected here. */ qnap_tsx1x_register_flash(); kirkwood_i2c_init(); i2c_register_board_info(0, &qnap_ts219_i2c_rtc, 1); kirkwood_pcie_id(&dev, &rev); if (dev == MV88F6282_DEV_ID) { qnap_ts219_buttons[0].gpio = 43; /* USB Copy button */ qnap_ts219_buttons[1].gpio = 37; /* Reset button */ qnap_ts219_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0); } kirkwood_ge00_init(&qnap_ts219_ge00_data); kirkwood_sata_init(&qnap_ts219_sata_data); kirkwood_ehci_init(); platform_device_register(&qnap_ts219_button_device); pm_power_off = qnap_tsx1x_power_off; } static int __init ts219_pci_init(void) { if (machine_is_ts219()) kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0); return 0; } subsys_initcall(ts219_pci_init); MACHINE_START(TS219, "QNAP TS-119/TS-219") /* Maintainer: Martin Michlmayr <tbm@cyrius.com> */ .atag_offset = 0x100, .init_machine = qnap_ts219_init, .map_io = kirkwood_map_io, .init_early = kirkwood_init_early, .init_irq = kirkwood_init_irq, .init_time = kirkwood_timer_init, .restart = kirkwood_restart, MACHINE_END
gpl-2.0
hunter3k/aosp_kernel_lge_d315
lib/mpi/mpih-mul.c
4938
15270
/* mpihelp-mul.c - MPI helper functions * Copyright (C) 1994, 1996, 1998, 1999, * 2000 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. * The GNU MP Library itself is published under the LGPL; * however I decided to publish this code under the plain GPL. */ #include <linux/string.h> #include "mpi-internal.h" #include "longlong.h" #define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \ do { \ if ((size) < KARATSUBA_THRESHOLD) \ mul_n_basecase(prodp, up, vp, size); \ else \ mul_n(prodp, up, vp, size, tspace); \ } while (0); #define MPN_SQR_N_RECURSE(prodp, up, size, tspace) \ do { \ if ((size) < KARATSUBA_THRESHOLD) \ mpih_sqr_n_basecase(prodp, up, size); \ else \ mpih_sqr_n(prodp, up, size, tspace); \ } while (0); /* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP), * both with SIZE limbs, and store the result at PRODP. 2 * SIZE limbs are * always stored. Return the most significant limb. * * Argument constraints: * 1. PRODP != UP and PRODP != VP, i.e. the destination * must be distinct from the multiplier and the multiplicand. * * * Handle simple cases with traditional multiplication. * * This is the most critical code of multiplication. All multiplies rely * on this, both small and huge. Small ones arrive here immediately. Huge * ones arrive here as this is the base case for Karatsuba's recursive * algorithm below. */ static mpi_limb_t mul_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size) { mpi_size_t i; mpi_limb_t cy; mpi_limb_t v_limb; /* Multiply by the first limb in V separately, as the result can be * stored (not added) to PROD. We also avoid a loop for zeroing. */ v_limb = vp[0]; if (v_limb <= 1) { if (v_limb == 1) MPN_COPY(prodp, up, size); else MPN_ZERO(prodp, size); cy = 0; } else cy = mpihelp_mul_1(prodp, up, size, v_limb); prodp[size] = cy; prodp++; /* For each iteration in the outer loop, multiply one limb from * U with one limb from V, and add it to PROD. */ for (i = 1; i < size; i++) { v_limb = vp[i]; if (v_limb <= 1) { cy = 0; if (v_limb == 1) cy = mpihelp_add_n(prodp, prodp, up, size); } else cy = mpihelp_addmul_1(prodp, up, size, v_limb); prodp[size] = cy; prodp++; } return cy; } static void mul_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size, mpi_ptr_t tspace) { if (size & 1) { /* The size is odd, and the code below doesn't handle that. * Multiply the least significant (size - 1) limbs with a recursive * call, and handle the most significant limb of S1 and S2 * separately. * A slightly faster way to do this would be to make the Karatsuba * code below behave as if the size were even, and let it check for * odd size in the end. I.e., in essence move this code to the end. * Doing so would save us a recursive call, and potentially make the * stack grow a lot less. */ mpi_size_t esize = size - 1; /* even size */ mpi_limb_t cy_limb; MPN_MUL_N_RECURSE(prodp, up, vp, esize, tspace); cy_limb = mpihelp_addmul_1(prodp + esize, up, esize, vp[esize]); prodp[esize + esize] = cy_limb; cy_limb = mpihelp_addmul_1(prodp + esize, vp, size, up[esize]); prodp[esize + size] = cy_limb; } else { /* Anatolij Alekseevich Karatsuba's divide-and-conquer algorithm. * * Split U in two pieces, U1 and U0, such that * U = U0 + U1*(B**n), * and V in V1 and V0, such that * V = V0 + V1*(B**n). * * UV is then computed recursively using the identity * * 2n n n n * UV = (B + B )U V + B (U -U )(V -V ) + (B + 1)U V * 1 1 1 0 0 1 0 0 * * Where B = 2**BITS_PER_MP_LIMB. */ mpi_size_t hsize = size >> 1; mpi_limb_t cy; int negflg; /* Product H. ________________ ________________ * |_____U1 x V1____||____U0 x V0_____| * Put result in upper part of PROD and pass low part of TSPACE * as new TSPACE. */ MPN_MUL_N_RECURSE(prodp + size, up + hsize, vp + hsize, hsize, tspace); /* Product M. ________________ * |_(U1-U0)(V0-V1)_| */ if (mpihelp_cmp(up + hsize, up, hsize) >= 0) { mpihelp_sub_n(prodp, up + hsize, up, hsize); negflg = 0; } else { mpihelp_sub_n(prodp, up, up + hsize, hsize); negflg = 1; } if (mpihelp_cmp(vp + hsize, vp, hsize) >= 0) { mpihelp_sub_n(prodp + hsize, vp + hsize, vp, hsize); negflg ^= 1; } else { mpihelp_sub_n(prodp + hsize, vp, vp + hsize, hsize); /* No change of NEGFLG. */ } /* Read temporary operands from low part of PROD. * Put result in low part of TSPACE using upper part of TSPACE * as new TSPACE. */ MPN_MUL_N_RECURSE(tspace, prodp, prodp + hsize, hsize, tspace + size); /* Add/copy product H. */ MPN_COPY(prodp + hsize, prodp + size, hsize); cy = mpihelp_add_n(prodp + size, prodp + size, prodp + size + hsize, hsize); /* Add product M (if NEGFLG M is a negative number) */ if (negflg) cy -= mpihelp_sub_n(prodp + hsize, prodp + hsize, tspace, size); else cy += mpihelp_add_n(prodp + hsize, prodp + hsize, tspace, size); /* Product L. ________________ ________________ * |________________||____U0 x V0_____| * Read temporary operands from low part of PROD. * Put result in low part of TSPACE using upper part of TSPACE * as new TSPACE. */ MPN_MUL_N_RECURSE(tspace, up, vp, hsize, tspace + size); /* Add/copy Product L (twice) */ cy += mpihelp_add_n(prodp + hsize, prodp + hsize, tspace, size); if (cy) mpihelp_add_1(prodp + hsize + size, prodp + hsize + size, hsize, cy); MPN_COPY(prodp, tspace, hsize); cy = mpihelp_add_n(prodp + hsize, prodp + hsize, tspace + hsize, hsize); if (cy) mpihelp_add_1(prodp + size, prodp + size, size, 1); } } void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size) { mpi_size_t i; mpi_limb_t cy_limb; mpi_limb_t v_limb; /* Multiply by the first limb in V separately, as the result can be * stored (not added) to PROD. We also avoid a loop for zeroing. */ v_limb = up[0]; if (v_limb <= 1) { if (v_limb == 1) MPN_COPY(prodp, up, size); else MPN_ZERO(prodp, size); cy_limb = 0; } else cy_limb = mpihelp_mul_1(prodp, up, size, v_limb); prodp[size] = cy_limb; prodp++; /* For each iteration in the outer loop, multiply one limb from * U with one limb from V, and add it to PROD. */ for (i = 1; i < size; i++) { v_limb = up[i]; if (v_limb <= 1) { cy_limb = 0; if (v_limb == 1) cy_limb = mpihelp_add_n(prodp, prodp, up, size); } else cy_limb = mpihelp_addmul_1(prodp, up, size, v_limb); prodp[size] = cy_limb; prodp++; } } void mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace) { if (size & 1) { /* The size is odd, and the code below doesn't handle that. * Multiply the least significant (size - 1) limbs with a recursive * call, and handle the most significant limb of S1 and S2 * separately. * A slightly faster way to do this would be to make the Karatsuba * code below behave as if the size were even, and let it check for * odd size in the end. I.e., in essence move this code to the end. * Doing so would save us a recursive call, and potentially make the * stack grow a lot less. */ mpi_size_t esize = size - 1; /* even size */ mpi_limb_t cy_limb; MPN_SQR_N_RECURSE(prodp, up, esize, tspace); cy_limb = mpihelp_addmul_1(prodp + esize, up, esize, up[esize]); prodp[esize + esize] = cy_limb; cy_limb = mpihelp_addmul_1(prodp + esize, up, size, up[esize]); prodp[esize + size] = cy_limb; } else { mpi_size_t hsize = size >> 1; mpi_limb_t cy; /* Product H. ________________ ________________ * |_____U1 x U1____||____U0 x U0_____| * Put result in upper part of PROD and pass low part of TSPACE * as new TSPACE. */ MPN_SQR_N_RECURSE(prodp + size, up + hsize, hsize, tspace); /* Product M. ________________ * |_(U1-U0)(U0-U1)_| */ if (mpihelp_cmp(up + hsize, up, hsize) >= 0) mpihelp_sub_n(prodp, up + hsize, up, hsize); else mpihelp_sub_n(prodp, up, up + hsize, hsize); /* Read temporary operands from low part of PROD. * Put result in low part of TSPACE using upper part of TSPACE * as new TSPACE. */ MPN_SQR_N_RECURSE(tspace, prodp, hsize, tspace + size); /* Add/copy product H */ MPN_COPY(prodp + hsize, prodp + size, hsize); cy = mpihelp_add_n(prodp + size, prodp + size, prodp + size + hsize, hsize); /* Add product M (if NEGFLG M is a negative number). */ cy -= mpihelp_sub_n(prodp + hsize, prodp + hsize, tspace, size); /* Product L. ________________ ________________ * |________________||____U0 x U0_____| * Read temporary operands from low part of PROD. * Put result in low part of TSPACE using upper part of TSPACE * as new TSPACE. */ MPN_SQR_N_RECURSE(tspace, up, hsize, tspace + size); /* Add/copy Product L (twice). */ cy += mpihelp_add_n(prodp + hsize, prodp + hsize, tspace, size); if (cy) mpihelp_add_1(prodp + hsize + size, prodp + hsize + size, hsize, cy); MPN_COPY(prodp, tspace, hsize); cy = mpihelp_add_n(prodp + hsize, prodp + hsize, tspace + hsize, hsize); if (cy) mpihelp_add_1(prodp + size, prodp + size, size, 1); } } /* This should be made into an inline function in gmp.h. */ int mpihelp_mul_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size) { if (up == vp) { if (size < KARATSUBA_THRESHOLD) mpih_sqr_n_basecase(prodp, up, size); else { mpi_ptr_t tspace; tspace = mpi_alloc_limb_space(2 * size); if (!tspace) return -ENOMEM; mpih_sqr_n(prodp, up, size, tspace); mpi_free_limb_space(tspace); } } else { if (size < KARATSUBA_THRESHOLD) mul_n_basecase(prodp, up, vp, size); else { mpi_ptr_t tspace; tspace = mpi_alloc_limb_space(2 * size); if (!tspace) return -ENOMEM; mul_n(prodp, up, vp, size, tspace); mpi_free_limb_space(tspace); } } return 0; } int mpihelp_mul_karatsuba_case(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize, mpi_ptr_t vp, mpi_size_t vsize, struct karatsuba_ctx *ctx) { mpi_limb_t cy; if (!ctx->tspace || ctx->tspace_size < vsize) { if (ctx->tspace) mpi_free_limb_space(ctx->tspace); ctx->tspace = mpi_alloc_limb_space(2 * vsize); if (!ctx->tspace) return -ENOMEM; ctx->tspace_size = vsize; } MPN_MUL_N_RECURSE(prodp, up, vp, vsize, ctx->tspace); prodp += vsize; up += vsize; usize -= vsize; if (usize >= vsize) { if (!ctx->tp || ctx->tp_size < vsize) { if (ctx->tp) mpi_free_limb_space(ctx->tp); ctx->tp = mpi_alloc_limb_space(2 * vsize); if (!ctx->tp) { if (ctx->tspace) mpi_free_limb_space(ctx->tspace); ctx->tspace = NULL; return -ENOMEM; } ctx->tp_size = vsize; } do { MPN_MUL_N_RECURSE(ctx->tp, up, vp, vsize, ctx->tspace); cy = mpihelp_add_n(prodp, prodp, ctx->tp, vsize); mpihelp_add_1(prodp + vsize, ctx->tp + vsize, vsize, cy); prodp += vsize; up += vsize; usize -= vsize; } while (usize >= vsize); } if (usize) { if (usize < KARATSUBA_THRESHOLD) { mpi_limb_t tmp; if (mpihelp_mul(ctx->tspace, vp, vsize, up, usize, &tmp) < 0) return -ENOMEM; } else { if (!ctx->next) { ctx->next = kzalloc(sizeof *ctx, GFP_KERNEL); if (!ctx->next) return -ENOMEM; } if (mpihelp_mul_karatsuba_case(ctx->tspace, vp, vsize, up, usize, ctx->next) < 0) return -ENOMEM; } cy = mpihelp_add_n(prodp, prodp, ctx->tspace, vsize); mpihelp_add_1(prodp + vsize, ctx->tspace + vsize, usize, cy); } return 0; } void mpihelp_release_karatsuba_ctx(struct karatsuba_ctx *ctx) { struct karatsuba_ctx *ctx2; if (ctx->tp) mpi_free_limb_space(ctx->tp); if (ctx->tspace) mpi_free_limb_space(ctx->tspace); for (ctx = ctx->next; ctx; ctx = ctx2) { ctx2 = ctx->next; if (ctx->tp) mpi_free_limb_space(ctx->tp); if (ctx->tspace) mpi_free_limb_space(ctx->tspace); kfree(ctx); } } /* Multiply the natural numbers u (pointed to by UP, with USIZE limbs) * and v (pointed to by VP, with VSIZE limbs), and store the result at * PRODP. USIZE + VSIZE limbs are always stored, but if the input * operands are normalized. Return the most significant limb of the * result. * * NOTE: The space pointed to by PRODP is overwritten before finished * with U and V, so overlap is an error. * * Argument constraints: * 1. USIZE >= VSIZE. * 2. PRODP != UP and PRODP != VP, i.e. the destination * must be distinct from the multiplier and the multiplicand. */ int mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize, mpi_ptr_t vp, mpi_size_t vsize, mpi_limb_t *_result) { mpi_ptr_t prod_endp = prodp + usize + vsize - 1; mpi_limb_t cy; struct karatsuba_ctx ctx; if (vsize < KARATSUBA_THRESHOLD) { mpi_size_t i; mpi_limb_t v_limb; if (!vsize) { *_result = 0; return 0; } /* Multiply by the first limb in V separately, as the result can be * stored (not added) to PROD. We also avoid a loop for zeroing. */ v_limb = vp[0]; if (v_limb <= 1) { if (v_limb == 1) MPN_COPY(prodp, up, usize); else MPN_ZERO(prodp, usize); cy = 0; } else cy = mpihelp_mul_1(prodp, up, usize, v_limb); prodp[usize] = cy; prodp++; /* For each iteration in the outer loop, multiply one limb from * U with one limb from V, and add it to PROD. */ for (i = 1; i < vsize; i++) { v_limb = vp[i]; if (v_limb <= 1) { cy = 0; if (v_limb == 1) cy = mpihelp_add_n(prodp, prodp, up, usize); } else cy = mpihelp_addmul_1(prodp, up, usize, v_limb); prodp[usize] = cy; prodp++; } *_result = cy; return 0; } memset(&ctx, 0, sizeof ctx); if (mpihelp_mul_karatsuba_case(prodp, up, usize, vp, vsize, &ctx) < 0) return -ENOMEM; mpihelp_release_karatsuba_ctx(&ctx); *_result = *prod_endp; return 0; }
gpl-2.0
jrfastab/tx-rate-limits
arch/arm/mach-mmp/common.c
6218
1205
/* * linux/arch/arm/mach-mmp/common.c * * Code common to PXA168 processor lines * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <asm/page.h> #include <asm/mach/map.h> #include <asm/system_misc.h> #include <mach/addr-map.h> #include <mach/cputype.h> #include "common.h" #define MMP_CHIPID (AXI_VIRT_BASE + 0x82c00) unsigned int mmp_chip_id; EXPORT_SYMBOL(mmp_chip_id); static struct map_desc standard_io_desc[] __initdata = { { .pfn = __phys_to_pfn(APB_PHYS_BASE), .virtual = (unsigned long)APB_VIRT_BASE, .length = APB_PHYS_SIZE, .type = MT_DEVICE, }, { .pfn = __phys_to_pfn(AXI_PHYS_BASE), .virtual = (unsigned long)AXI_VIRT_BASE, .length = AXI_PHYS_SIZE, .type = MT_DEVICE, }, }; void __init mmp_map_io(void) { iotable_init(standard_io_desc, ARRAY_SIZE(standard_io_desc)); /* this is early, initialize mmp_chip_id here */ mmp_chip_id = __raw_readl(MMP_CHIPID); } void mmp_restart(char mode, const char *cmd) { soft_restart(0); }
gpl-2.0
MarkDownUnder/android_kernel_htc_leo
drivers/media/video/bt866.c
7242
6255
/* bt866 - BT866 Digital Video Encoder (Rockwell Part) Copyright (C) 1999 Mike Bernson <mike@mlb.org> Copyright (C) 1998 Dave Perks <dperks@ibm.net> Modifications for LML33/DC10plus unified driver Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx> This code was modify/ported from the saa7111 driver written by Dave Perks. This code was adapted for the bt866 by Christer Weinigel and ported to 2.6 by Martin Samuelsson. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> MODULE_DESCRIPTION("Brooktree-866 video encoder driver"); MODULE_AUTHOR("Mike Bernson & Dave Perks"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /* ----------------------------------------------------------------------- */ struct bt866 { struct v4l2_subdev sd; u8 reg[256]; }; static inline struct bt866 *to_bt866(struct v4l2_subdev *sd) { return container_of(sd, struct bt866, sd); } static int bt866_write(struct bt866 *encoder, u8 subaddr, u8 data) { struct i2c_client *client = v4l2_get_subdevdata(&encoder->sd); u8 buffer[2]; int err; buffer[0] = subaddr; buffer[1] = data; encoder->reg[subaddr] = data; v4l_dbg(1, debug, client, "write 0x%02x = 0x%02x\n", subaddr, data); for (err = 0; err < 3;) { if (i2c_master_send(client, buffer, 2) == 2) break; err++; v4l_warn(client, "error #%d writing to 0x%02x\n", err, subaddr); schedule_timeout_interruptible(msecs_to_jiffies(100)); } if (err == 3) { v4l_warn(client, "giving up\n"); return -1; } return 0; } static int bt866_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std) { v4l2_dbg(1, debug, sd, "set norm %llx\n", (unsigned long long)std); /* Only PAL supported by this driver at the moment! */ if (!(std & V4L2_STD_NTSC)) return -EINVAL; return 0; } static int bt866_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { static const __u8 init[] = { 0xc8, 0xcc, /* CRSCALE */ 0xca, 0x91, /* CBSCALE */ 0xcc, 0x24, /* YC16 | OSDNUM */ 0xda, 0x00, /* */ 0xdc, 0x24, /* SETMODE | PAL */ 0xde, 0x02, /* EACTIVE */ /* overlay colors */ 0x70, 0xEB, 0x90, 0x80, 0xB0, 0x80, /* white */ 0x72, 0xA2, 0x92, 0x8E, 0xB2, 0x2C, /* yellow */ 0x74, 0x83, 0x94, 0x2C, 0xB4, 0x9C, /* cyan */ 0x76, 0x70, 0x96, 0x3A, 0xB6, 0x48, /* green */ 0x78, 0x54, 0x98, 0xC6, 0xB8, 0xB8, /* magenta */ 0x7A, 0x41, 0x9A, 0xD4, 0xBA, 0x64, /* red */ 0x7C, 0x23, 0x9C, 0x72, 0xBC, 0xD4, /* blue */ 0x7E, 0x10, 0x9E, 0x80, 0xBE, 0x80, /* black */ 0x60, 0xEB, 0x80, 0x80, 0xc0, 0x80, /* white */ 0x62, 0xA2, 0x82, 0x8E, 0xc2, 0x2C, /* yellow */ 0x64, 0x83, 0x84, 0x2C, 0xc4, 0x9C, /* cyan */ 0x66, 0x70, 0x86, 0x3A, 0xc6, 0x48, /* green */ 0x68, 0x54, 0x88, 0xC6, 0xc8, 0xB8, /* magenta */ 0x6A, 0x41, 0x8A, 0xD4, 0xcA, 0x64, /* red */ 0x6C, 0x23, 0x8C, 0x72, 0xcC, 0xD4, /* blue */ 0x6E, 0x10, 0x8E, 0x80, 0xcE, 0x80, /* black */ }; struct bt866 *encoder = to_bt866(sd); u8 val; int i; for (i = 0; i < ARRAY_SIZE(init) / 2; i += 2) bt866_write(encoder, init[i], init[i+1]); val = encoder->reg[0xdc]; if (input == 0) val |= 0x40; /* CBSWAP */ else val &= ~0x40; /* !CBSWAP */ bt866_write(encoder, 0xdc, val); val = encoder->reg[0xcc]; if (input == 2) val |= 0x01; /* OSDBAR */ else val &= ~0x01; /* !OSDBAR */ bt866_write(encoder, 0xcc, val); v4l2_dbg(1, debug, sd, "set input %d\n", input); switch (input) { case 0: case 1: case 2: break; default: return -EINVAL; } return 0; } #if 0 /* Code to setup square pixels, might be of some use in the future, but is currently unused. */ val = encoder->reg[0xdc]; if (*iarg) val |= 1; /* SQUARE */ else val &= ~1; /* !SQUARE */ bt866_write(client, 0xdc, val); #endif static int bt866_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_BT866, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops bt866_core_ops = { .g_chip_ident = bt866_g_chip_ident, }; static const struct v4l2_subdev_video_ops bt866_video_ops = { .s_std_output = bt866_s_std_output, .s_routing = bt866_s_routing, }; static const struct v4l2_subdev_ops bt866_ops = { .core = &bt866_core_ops, .video = &bt866_video_ops, }; static int bt866_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct bt866 *encoder; struct v4l2_subdev *sd; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); encoder = kzalloc(sizeof(*encoder), GFP_KERNEL); if (encoder == NULL) return -ENOMEM; sd = &encoder->sd; v4l2_i2c_subdev_init(sd, client, &bt866_ops); return 0; } static int bt866_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(to_bt866(sd)); return 0; } static const struct i2c_device_id bt866_id[] = { { "bt866", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, bt866_id); static struct i2c_driver bt866_driver = { .driver = { .owner = THIS_MODULE, .name = "bt866", }, .probe = bt866_probe, .remove = bt866_remove, .id_table = bt866_id, }; module_i2c_driver(bt866_driver);
gpl-2.0
IllusionRom-deprecated/android_kernel_lge_msm8974
drivers/media/video/wm8739.c
7242
7739
/* * wm8739 * * Copyright (C) 2005 T. Adachi <tadachi@tadachi-net.com> * * Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl> * - Cleanup * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-ctrls.h> MODULE_DESCRIPTION("wm8739 driver"); MODULE_AUTHOR("T. Adachi, Hans Verkuil"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /* ------------------------------------------------------------------------ */ enum { R0 = 0, R1, R5 = 5, R6, R7, R8, R9, R15 = 15, TOT_REGS }; struct wm8739_state { struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; struct { /* audio cluster */ struct v4l2_ctrl *volume; struct v4l2_ctrl *mute; struct v4l2_ctrl *balance; }; u32 clock_freq; }; static inline struct wm8739_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct wm8739_state, sd); } static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct wm8739_state, hdl)->sd; } /* ------------------------------------------------------------------------ */ static int wm8739_write(struct v4l2_subdev *sd, int reg, u16 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); int i; if (reg < 0 || reg >= TOT_REGS) { v4l2_err(sd, "Invalid register R%d\n", reg); return -1; } v4l2_dbg(1, debug, sd, "write: %02x %02x\n", reg, val); for (i = 0; i < 3; i++) if (i2c_smbus_write_byte_data(client, (reg << 1) | (val >> 8), val & 0xff) == 0) return 0; v4l2_err(sd, "I2C: cannot write %03x to register R%d\n", val, reg); return -1; } static int wm8739_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct wm8739_state *state = to_state(sd); unsigned int work_l, work_r; u8 vol_l; /* +12dB to -34.5dB 1.5dB step (5bit) def:0dB */ u8 vol_r; /* +12dB to -34.5dB 1.5dB step (5bit) def:0dB */ u16 mute; switch (ctrl->id) { case V4L2_CID_AUDIO_VOLUME: break; default: return -EINVAL; } /* normalize ( 65535 to 0 -> 31 to 0 (12dB to -34.5dB) ) */ work_l = (min(65536 - state->balance->val, 32768) * state->volume->val) / 32768; work_r = (min(state->balance->val, 32768) * state->volume->val) / 32768; vol_l = (long)work_l * 31 / 65535; vol_r = (long)work_r * 31 / 65535; /* set audio volume etc. */ mute = state->mute->val ? 0x80 : 0; /* Volume setting: bits 0-4, 0x1f = 12 dB, 0x00 = -34.5 dB * Default setting: 0x17 = 0 dB */ wm8739_write(sd, R0, (vol_l & 0x1f) | mute); wm8739_write(sd, R1, (vol_r & 0x1f) | mute); return 0; } /* ------------------------------------------------------------------------ */ static int wm8739_s_clock_freq(struct v4l2_subdev *sd, u32 audiofreq) { struct wm8739_state *state = to_state(sd); state->clock_freq = audiofreq; /* de-activate */ wm8739_write(sd, R9, 0x000); switch (audiofreq) { case 44100: /* 256fps, fs=44.1k */ wm8739_write(sd, R8, 0x020); break; case 48000: /* 256fps, fs=48k */ wm8739_write(sd, R8, 0x000); break; case 32000: /* 256fps, fs=32k */ wm8739_write(sd, R8, 0x018); break; default: break; } /* activate */ wm8739_write(sd, R9, 0x001); return 0; } static int wm8739_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_WM8739, 0); } static int wm8739_log_status(struct v4l2_subdev *sd) { struct wm8739_state *state = to_state(sd); v4l2_info(sd, "Frequency: %u Hz\n", state->clock_freq); v4l2_ctrl_handler_log_status(&state->hdl, sd->name); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_ctrl_ops wm8739_ctrl_ops = { .s_ctrl = wm8739_s_ctrl, }; static const struct v4l2_subdev_core_ops wm8739_core_ops = { .log_status = wm8739_log_status, .g_chip_ident = wm8739_g_chip_ident, .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, .g_ctrl = v4l2_subdev_g_ctrl, .s_ctrl = v4l2_subdev_s_ctrl, .queryctrl = v4l2_subdev_queryctrl, .querymenu = v4l2_subdev_querymenu, }; static const struct v4l2_subdev_audio_ops wm8739_audio_ops = { .s_clock_freq = wm8739_s_clock_freq, }; static const struct v4l2_subdev_ops wm8739_ops = { .core = &wm8739_core_ops, .audio = &wm8739_audio_ops, }; /* ------------------------------------------------------------------------ */ /* i2c implementation */ static int wm8739_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct wm8739_state *state; struct v4l2_subdev *sd; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); state = kzalloc(sizeof(struct wm8739_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &wm8739_ops); v4l2_ctrl_handler_init(&state->hdl, 2); state->volume = v4l2_ctrl_new_std(&state->hdl, &wm8739_ctrl_ops, V4L2_CID_AUDIO_VOLUME, 0, 65535, 65535 / 100, 50736); state->mute = v4l2_ctrl_new_std(&state->hdl, &wm8739_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0); state->balance = v4l2_ctrl_new_std(&state->hdl, &wm8739_ctrl_ops, V4L2_CID_AUDIO_BALANCE, 0, 65535, 65535 / 100, 32768); sd->ctrl_handler = &state->hdl; if (state->hdl.error) { int err = state->hdl.error; v4l2_ctrl_handler_free(&state->hdl); kfree(state); return err; } v4l2_ctrl_cluster(3, &state->volume); state->clock_freq = 48000; /* Initialize wm8739 */ /* reset */ wm8739_write(sd, R15, 0x00); /* filter setting, high path, offet clear */ wm8739_write(sd, R5, 0x000); /* ADC, OSC, Power Off mode Disable */ wm8739_write(sd, R6, 0x000); /* Digital Audio interface format: Enable Master mode, 24 bit, MSB first/left justified */ wm8739_write(sd, R7, 0x049); /* sampling control: normal, 256fs, 48KHz sampling rate */ wm8739_write(sd, R8, 0x000); /* activate */ wm8739_write(sd, R9, 0x001); /* set volume/mute */ v4l2_ctrl_handler_setup(&state->hdl); return 0; } static int wm8739_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct wm8739_state *state = to_state(sd); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(&state->hdl); kfree(to_state(sd)); return 0; } static const struct i2c_device_id wm8739_id[] = { { "wm8739", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8739_id); static struct i2c_driver wm8739_driver = { .driver = { .owner = THIS_MODULE, .name = "wm8739", }, .probe = wm8739_probe, .remove = wm8739_remove, .id_table = wm8739_id, }; module_i2c_driver(wm8739_driver);
gpl-2.0
psachin/apc-rock-II-kernel
drivers/media/video/tlv320aic23b.c
7242
6242
/* * tlv320aic23b - driver version 0.0.1 * * Copyright (C) 2006 Scott Alfter <salfter@ssai.us> * * Based on wm8775 driver * * Copyright (C) 2004 Ulf Eklund <ivtv at eklund.to> * Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> MODULE_DESCRIPTION("tlv320aic23b driver"); MODULE_AUTHOR("Scott Alfter, Ulf Eklund, Hans Verkuil"); MODULE_LICENSE("GPL"); /* ----------------------------------------------------------------------- */ struct tlv320aic23b_state { struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; }; static inline struct tlv320aic23b_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct tlv320aic23b_state, sd); } static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct tlv320aic23b_state, hdl)->sd; } static int tlv320aic23b_write(struct v4l2_subdev *sd, int reg, u16 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); int i; if ((reg < 0 || reg > 9) && (reg != 15)) { v4l2_err(sd, "Invalid register R%d\n", reg); return -1; } for (i = 0; i < 3; i++) if (i2c_smbus_write_byte_data(client, (reg << 1) | (val >> 8), val & 0xff) == 0) return 0; v4l2_err(sd, "I2C: cannot write %03x to register R%d\n", val, reg); return -1; } static int tlv320aic23b_s_clock_freq(struct v4l2_subdev *sd, u32 freq) { switch (freq) { case 32000: /* set sample rate to 32 kHz */ tlv320aic23b_write(sd, 8, 0x018); break; case 44100: /* set sample rate to 44.1 kHz */ tlv320aic23b_write(sd, 8, 0x022); break; case 48000: /* set sample rate to 48 kHz */ tlv320aic23b_write(sd, 8, 0x000); break; default: return -EINVAL; } return 0; } static int tlv320aic23b_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: tlv320aic23b_write(sd, 0, 0x180); /* mute both channels */ /* set gain on both channels to +3.0 dB */ if (!ctrl->val) tlv320aic23b_write(sd, 0, 0x119); return 0; } return -EINVAL; } static int tlv320aic23b_log_status(struct v4l2_subdev *sd) { struct tlv320aic23b_state *state = to_state(sd); v4l2_ctrl_handler_log_status(&state->hdl, sd->name); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_ctrl_ops tlv320aic23b_ctrl_ops = { .s_ctrl = tlv320aic23b_s_ctrl, }; static const struct v4l2_subdev_core_ops tlv320aic23b_core_ops = { .log_status = tlv320aic23b_log_status, .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, .g_ctrl = v4l2_subdev_g_ctrl, .s_ctrl = v4l2_subdev_s_ctrl, .queryctrl = v4l2_subdev_queryctrl, .querymenu = v4l2_subdev_querymenu, }; static const struct v4l2_subdev_audio_ops tlv320aic23b_audio_ops = { .s_clock_freq = tlv320aic23b_s_clock_freq, }; static const struct v4l2_subdev_ops tlv320aic23b_ops = { .core = &tlv320aic23b_core_ops, .audio = &tlv320aic23b_audio_ops, }; /* ----------------------------------------------------------------------- */ /* i2c implementation */ /* * Generic i2c probe * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' */ static int tlv320aic23b_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct tlv320aic23b_state *state; struct v4l2_subdev *sd; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); state = kzalloc(sizeof(struct tlv320aic23b_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &tlv320aic23b_ops); /* Initialize tlv320aic23b */ /* RESET */ tlv320aic23b_write(sd, 15, 0x000); /* turn off DAC & mic input */ tlv320aic23b_write(sd, 6, 0x00A); /* left-justified, 24-bit, master mode */ tlv320aic23b_write(sd, 7, 0x049); /* set gain on both channels to +3.0 dB */ tlv320aic23b_write(sd, 0, 0x119); /* set sample rate to 48 kHz */ tlv320aic23b_write(sd, 8, 0x000); /* activate digital interface */ tlv320aic23b_write(sd, 9, 0x001); v4l2_ctrl_handler_init(&state->hdl, 1); v4l2_ctrl_new_std(&state->hdl, &tlv320aic23b_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0); sd->ctrl_handler = &state->hdl; if (state->hdl.error) { int err = state->hdl.error; v4l2_ctrl_handler_free(&state->hdl); kfree(state); return err; } v4l2_ctrl_handler_setup(&state->hdl); return 0; } static int tlv320aic23b_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct tlv320aic23b_state *state = to_state(sd); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(&state->hdl); kfree(state); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id tlv320aic23b_id[] = { { "tlv320aic23b", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tlv320aic23b_id); static struct i2c_driver tlv320aic23b_driver = { .driver = { .owner = THIS_MODULE, .name = "tlv320aic23b", }, .probe = tlv320aic23b_probe, .remove = tlv320aic23b_remove, .id_table = tlv320aic23b_id, }; module_i2c_driver(tlv320aic23b_driver);
gpl-2.0
Guazi/kernelwip
drivers/media/video/cs53l32a.c
7242
6736
/* * cs53l32a (Adaptec AVC-2010 and AVC-2410) i2c ivtv driver. * Copyright (C) 2005 Martin Vaughan * * Audio source switching for Adaptec AVC-2410 added by Trev Jackson * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-ctrls.h> MODULE_DESCRIPTION("i2c device driver for cs53l32a Audio ADC"); MODULE_AUTHOR("Martin Vaughan"); MODULE_LICENSE("GPL"); static bool debug; module_param(debug, bool, 0644); MODULE_PARM_DESC(debug, "Debugging messages, 0=Off (default), 1=On"); struct cs53l32a_state { struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; }; static inline struct cs53l32a_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct cs53l32a_state, sd); } static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct cs53l32a_state, hdl)->sd; } /* ----------------------------------------------------------------------- */ static int cs53l32a_write(struct v4l2_subdev *sd, u8 reg, u8 value) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_write_byte_data(client, reg, value); } static int cs53l32a_read(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_read_byte_data(client, reg); } static int cs53l32a_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { /* There are 2 physical inputs, but the second input can be placed in two modes, the first mode bypasses the PGA (gain), the second goes through the PGA. Hence there are three possible inputs to choose from. */ if (input > 2) { v4l2_err(sd, "Invalid input %d.\n", input); return -EINVAL; } cs53l32a_write(sd, 0x01, 0x01 + (input << 4)); return 0; } static int cs53l32a_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: cs53l32a_write(sd, 0x03, ctrl->val ? 0xf0 : 0x30); return 0; case V4L2_CID_AUDIO_VOLUME: cs53l32a_write(sd, 0x04, (u8)ctrl->val); cs53l32a_write(sd, 0x05, (u8)ctrl->val); return 0; } return -EINVAL; } static int cs53l32a_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_CS53l32A, 0); } static int cs53l32a_log_status(struct v4l2_subdev *sd) { struct cs53l32a_state *state = to_state(sd); u8 v = cs53l32a_read(sd, 0x01); v4l2_info(sd, "Input: %d\n", (v >> 4) & 3); v4l2_ctrl_handler_log_status(&state->hdl, sd->name); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_ctrl_ops cs53l32a_ctrl_ops = { .s_ctrl = cs53l32a_s_ctrl, }; static const struct v4l2_subdev_core_ops cs53l32a_core_ops = { .log_status = cs53l32a_log_status, .g_chip_ident = cs53l32a_g_chip_ident, .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, .g_ctrl = v4l2_subdev_g_ctrl, .s_ctrl = v4l2_subdev_s_ctrl, .queryctrl = v4l2_subdev_queryctrl, .querymenu = v4l2_subdev_querymenu, }; static const struct v4l2_subdev_audio_ops cs53l32a_audio_ops = { .s_routing = cs53l32a_s_routing, }; static const struct v4l2_subdev_ops cs53l32a_ops = { .core = &cs53l32a_core_ops, .audio = &cs53l32a_audio_ops, }; /* ----------------------------------------------------------------------- */ /* i2c implementation */ /* * Generic i2c probe * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' */ static int cs53l32a_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct cs53l32a_state *state; struct v4l2_subdev *sd; int i; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; if (!id) strlcpy(client->name, "cs53l32a", sizeof(client->name)); v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); state = kzalloc(sizeof(struct cs53l32a_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &cs53l32a_ops); for (i = 1; i <= 7; i++) { u8 v = cs53l32a_read(sd, i); v4l2_dbg(1, debug, sd, "Read Reg %d %02x\n", i, v); } v4l2_ctrl_handler_init(&state->hdl, 2); v4l2_ctrl_new_std(&state->hdl, &cs53l32a_ctrl_ops, V4L2_CID_AUDIO_VOLUME, -96, 12, 1, 0); v4l2_ctrl_new_std(&state->hdl, &cs53l32a_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0); sd->ctrl_handler = &state->hdl; if (state->hdl.error) { int err = state->hdl.error; v4l2_ctrl_handler_free(&state->hdl); kfree(state); return err; } /* Set cs53l32a internal register for Adaptec 2010/2410 setup */ cs53l32a_write(sd, 0x01, 0x21); cs53l32a_write(sd, 0x02, 0x29); cs53l32a_write(sd, 0x03, 0x30); cs53l32a_write(sd, 0x04, 0x00); cs53l32a_write(sd, 0x05, 0x00); cs53l32a_write(sd, 0x06, 0x00); cs53l32a_write(sd, 0x07, 0x00); /* Display results, should be 0x21,0x29,0x30,0x00,0x00,0x00,0x00 */ for (i = 1; i <= 7; i++) { u8 v = cs53l32a_read(sd, i); v4l2_dbg(1, debug, sd, "Read Reg %d %02x\n", i, v); } return 0; } static int cs53l32a_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct cs53l32a_state *state = to_state(sd); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(&state->hdl); kfree(state); return 0; } static const struct i2c_device_id cs53l32a_id[] = { { "cs53l32a", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, cs53l32a_id); static struct i2c_driver cs53l32a_driver = { .driver = { .owner = THIS_MODULE, .name = "cs53l32a", }, .probe = cs53l32a_probe, .remove = cs53l32a_remove, .id_table = cs53l32a_id, }; module_i2c_driver(cs53l32a_driver);
gpl-2.0
flar2/m8-GPE
drivers/oprofile/oprofile_stats.c
75
2236
/** * @file oprofile_stats.c * * @remark Copyright 2002 OProfile authors * @remark Read the file COPYING * * @author John Levon */ #include <linux/oprofile.h> #include <linux/smp.h> #include <linux/cpumask.h> #include <linux/threads.h> #include "oprofile_stats.h" #include "cpu_buffer.h" struct oprofile_stat_struct oprofile_stats; void oprofile_reset_stats(void) { struct oprofile_cpu_buffer *cpu_buf; int i; for_each_possible_cpu(i) { cpu_buf = &per_cpu(op_cpu_buffer, i); cpu_buf->sample_received = 0; cpu_buf->sample_lost_overflow = 0; cpu_buf->backtrace_aborted = 0; cpu_buf->sample_invalid_eip = 0; } atomic_set(&oprofile_stats.sample_lost_no_mm, 0); atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); atomic_set(&oprofile_stats.event_lost_overflow, 0); atomic_set(&oprofile_stats.bt_lost_no_mapping, 0); atomic_set(&oprofile_stats.multiplex_counter, 0); } void oprofile_create_stats_files(struct super_block *sb, struct dentry *root) { struct oprofile_cpu_buffer *cpu_buf; struct dentry *cpudir; struct dentry *dir; char buf[10]; int i; dir = oprofilefs_mkdir(sb, root, "stats"); if (!dir) return; for_each_possible_cpu(i) { cpu_buf = &per_cpu(op_cpu_buffer, i); snprintf(buf, 10, "cpu%d", i); cpudir = oprofilefs_mkdir(sb, dir, buf); oprofilefs_create_ro_ulong(sb, cpudir, "sample_received", &cpu_buf->sample_received); oprofilefs_create_ro_ulong(sb, cpudir, "sample_lost_overflow", &cpu_buf->sample_lost_overflow); oprofilefs_create_ro_ulong(sb, cpudir, "backtrace_aborted", &cpu_buf->backtrace_aborted); oprofilefs_create_ro_ulong(sb, cpudir, "sample_invalid_eip", &cpu_buf->sample_invalid_eip); } oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm", &oprofile_stats.sample_lost_no_mm); oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping", &oprofile_stats.sample_lost_no_mapping); oprofilefs_create_ro_atomic(sb, dir, "event_lost_overflow", &oprofile_stats.event_lost_overflow); oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping", &oprofile_stats.bt_lost_no_mapping); #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX oprofilefs_create_ro_atomic(sb, dir, "multiplex_counter", &oprofile_stats.multiplex_counter); #endif }
gpl-2.0
InkVisible/wow
dep/acelite/ace/OS_NS_fcntl.cpp
75
8003
// $Id: OS_NS_fcntl.cpp 80826 2008-03-04 14:51:23Z wotte $ #include "ace/OS_NS_fcntl.h" ACE_RCSID(ace, OS_NS_fcntl, "$Id: OS_NS_fcntl.cpp 80826 2008-03-04 14:51:23Z wotte $") #if !defined (ACE_HAS_INLINED_OSCALLS) # include "ace/OS_NS_fcntl.inl" #endif /* ACE_HAS_INLINED_OSCALLS */ #include "ace/OS_NS_stdio.h" #include "ace/OS_NS_Thread.h" #include "ace/OS_NS_macros.h" #include "ace/Object_Manager_Base.h" ACE_BEGIN_VERSIONED_NAMESPACE_DECL ACE_HANDLE ACE_OS::open (const char *filename, int mode, mode_t perms, LPSECURITY_ATTRIBUTES sa) { ACE_OS_TRACE ("ACE_OS::open"); #if defined (ACE_WIN32) DWORD access = GENERIC_READ; if (ACE_BIT_ENABLED (mode, O_WRONLY)) access = GENERIC_WRITE; else if (ACE_BIT_ENABLED (mode, O_RDWR)) access = GENERIC_READ | GENERIC_WRITE; DWORD creation = OPEN_EXISTING; if ((mode & (_O_CREAT | _O_EXCL)) == (_O_CREAT | _O_EXCL)) creation = CREATE_NEW; else if ((mode & (_O_CREAT | _O_TRUNC)) == (_O_CREAT | _O_TRUNC)) creation = CREATE_ALWAYS; else if (ACE_BIT_ENABLED (mode, _O_CREAT)) creation = OPEN_ALWAYS; else if (ACE_BIT_ENABLED (mode, _O_TRUNC)) creation = TRUNCATE_EXISTING; DWORD flags = 0; if (ACE_BIT_ENABLED (mode, _O_TEMPORARY)) flags |= FILE_FLAG_DELETE_ON_CLOSE | FILE_ATTRIBUTE_TEMPORARY; if (ACE_BIT_ENABLED (mode, FILE_FLAG_WRITE_THROUGH)) flags |= FILE_FLAG_WRITE_THROUGH; if (ACE_BIT_ENABLED (mode, FILE_FLAG_OVERLAPPED)) flags |= FILE_FLAG_OVERLAPPED; if (ACE_BIT_ENABLED (mode, FILE_FLAG_NO_BUFFERING)) flags |= FILE_FLAG_NO_BUFFERING; if (ACE_BIT_ENABLED (mode, FILE_FLAG_RANDOM_ACCESS)) flags |= FILE_FLAG_RANDOM_ACCESS; if (ACE_BIT_ENABLED (mode, FILE_FLAG_SEQUENTIAL_SCAN)) flags |= FILE_FLAG_SEQUENTIAL_SCAN; if (ACE_BIT_ENABLED (mode, FILE_FLAG_DELETE_ON_CLOSE)) flags |= FILE_FLAG_DELETE_ON_CLOSE; if (ACE_BIT_ENABLED (mode, FILE_FLAG_BACKUP_SEMANTICS)) flags |= FILE_FLAG_BACKUP_SEMANTICS; if (ACE_BIT_ENABLED (mode, FILE_FLAG_POSIX_SEMANTICS)) flags |= FILE_FLAG_POSIX_SEMANTICS; ACE_MT (ACE_thread_mutex_t *ace_os_monitor_lock = 0;) if (ACE_BIT_ENABLED (mode, _O_APPEND)) { ACE_MT ( ace_os_monitor_lock = static_cast <ACE_thread_mutex_t *> ( ACE_OS_Object_Manager::preallocated_object[ ACE_OS_Object_Manager::ACE_OS_MONITOR_LOCK]); ACE_OS::thread_mutex_lock (ace_os_monitor_lock); ) } DWORD shared_mode = perms; SECURITY_ATTRIBUTES sa_buffer; SECURITY_DESCRIPTOR sd_buffer; #if defined (ACE_HAS_WINCE) ACE_HANDLE h = ::CreateFileW (ACE_Ascii_To_Wide (filename).wchar_rep (), access, shared_mode, ACE_OS::default_win32_security_attributes_r (sa, &sa_buffer, &sd_buffer), creation, flags, 0); #else /* ACE_HAS_WINCE */ ACE_HANDLE h = ::CreateFileA (filename, access, shared_mode, ACE_OS::default_win32_security_attributes_r (sa, &sa_buffer, &sd_buffer), creation, flags, 0); #endif /* ACE_HAS_WINCE */ if (ACE_BIT_ENABLED (mode, _O_APPEND)) { LONG high_size = 0; if (h != ACE_INVALID_HANDLE && ::SetFilePointer (h, 0, &high_size, FILE_END) == INVALID_SET_FILE_POINTER && GetLastError () != NO_ERROR) { ACE_MT (ACE_OS::thread_mutex_unlock (ace_os_monitor_lock);) ACE_FAIL_RETURN (ACE_INVALID_HANDLE); } ACE_MT (ACE_OS::thread_mutex_unlock (ace_os_monitor_lock);) } if (h == ACE_INVALID_HANDLE) ACE_FAIL_RETURN (h); else return h; #elif defined (INTEGRITY) ACE_UNUSED_ARG (sa); if(!strcmp(filename,ACE_DEV_NULL)) { ACE_OSCALL_RETURN (::AllocateNullConsoleDescriptor(), ACE_HANDLE, -1); } else { ACE_OSCALL_RETURN (::open (filename, mode, perms), ACE_HANDLE, -1); } #else ACE_UNUSED_ARG (sa); ACE_OSCALL_RETURN (::open (filename, mode, perms), ACE_HANDLE, ACE_INVALID_HANDLE); #endif /* ACE_WIN32 */ } #if defined (ACE_HAS_WCHAR) ACE_HANDLE ACE_OS::open (const wchar_t *filename, int mode, mode_t perms, LPSECURITY_ATTRIBUTES sa) { #if defined (ACE_WIN32) // @@ (brunsch) Yuck, maybe there is a way to combine the code // here with the char version DWORD access = GENERIC_READ; if (ACE_BIT_ENABLED (mode, O_WRONLY)) access = GENERIC_WRITE; else if (ACE_BIT_ENABLED (mode, O_RDWR)) access = GENERIC_READ | GENERIC_WRITE; DWORD creation = OPEN_EXISTING; if ((mode & (_O_CREAT | _O_EXCL)) == (_O_CREAT | _O_EXCL)) creation = CREATE_NEW; else if ((mode & (_O_CREAT | _O_TRUNC)) == (_O_CREAT | _O_TRUNC)) creation = CREATE_ALWAYS; else if (ACE_BIT_ENABLED (mode, _O_CREAT)) creation = OPEN_ALWAYS; else if (ACE_BIT_ENABLED (mode, _O_TRUNC)) creation = TRUNCATE_EXISTING; DWORD flags = 0; if (ACE_BIT_ENABLED (mode, _O_TEMPORARY)) flags |= FILE_FLAG_DELETE_ON_CLOSE | FILE_ATTRIBUTE_TEMPORARY; if (ACE_BIT_ENABLED (mode, FILE_FLAG_WRITE_THROUGH)) flags |= FILE_FLAG_WRITE_THROUGH; if (ACE_BIT_ENABLED (mode, FILE_FLAG_OVERLAPPED)) flags |= FILE_FLAG_OVERLAPPED; if (ACE_BIT_ENABLED (mode, FILE_FLAG_NO_BUFFERING)) flags |= FILE_FLAG_NO_BUFFERING; if (ACE_BIT_ENABLED (mode, FILE_FLAG_RANDOM_ACCESS)) flags |= FILE_FLAG_RANDOM_ACCESS; if (ACE_BIT_ENABLED (mode, FILE_FLAG_SEQUENTIAL_SCAN)) flags |= FILE_FLAG_SEQUENTIAL_SCAN; if (ACE_BIT_ENABLED (mode, FILE_FLAG_DELETE_ON_CLOSE)) flags |= FILE_FLAG_DELETE_ON_CLOSE; if (ACE_BIT_ENABLED (mode, FILE_FLAG_BACKUP_SEMANTICS)) flags |= FILE_FLAG_BACKUP_SEMANTICS; if (ACE_BIT_ENABLED (mode, FILE_FLAG_POSIX_SEMANTICS)) flags |= FILE_FLAG_POSIX_SEMANTICS; ACE_MT (ACE_thread_mutex_t *ace_os_monitor_lock = 0;) if (ACE_BIT_ENABLED (mode, _O_APPEND)) { ACE_MT ( ace_os_monitor_lock = static_cast <ACE_thread_mutex_t *> ( ACE_OS_Object_Manager::preallocated_object[ ACE_OS_Object_Manager::ACE_OS_MONITOR_LOCK]); ACE_OS::thread_mutex_lock (ace_os_monitor_lock); ) } DWORD shared_mode = perms; SECURITY_ATTRIBUTES sa_buffer; SECURITY_DESCRIPTOR sd_buffer; ACE_HANDLE h = ::CreateFileW (filename, access, shared_mode, ACE_OS::default_win32_security_attributes_r (sa, &sa_buffer, &sd_buffer), creation, flags, 0); if (ACE_BIT_ENABLED (mode, _O_APPEND)) { LONG high_size = 0; if (h != ACE_INVALID_HANDLE && ::SetFilePointer (h, 0, &high_size, FILE_END) == INVALID_SET_FILE_POINTER && GetLastError () != NO_ERROR) { ACE_MT (ACE_OS::thread_mutex_unlock (ace_os_monitor_lock);) ACE_FAIL_RETURN (ACE_INVALID_HANDLE); } ACE_MT (ACE_OS::thread_mutex_unlock (ace_os_monitor_lock);) } if (h == ACE_INVALID_HANDLE) ACE_FAIL_RETURN (h); else return h; #else /* ACE_WIN32 */ // Just emulate with ascii version return ACE_OS::open (ACE_Wide_To_Ascii (filename).char_rep (), mode, perms, sa); #endif /* ACE_WIN32 */ } #endif /* ACE_HAS_WCHAR */ ACE_END_VERSIONED_NAMESPACE_DECL
gpl-2.0
Silentlys/android_kernel_qcom_msm8916
drivers/gpu/msm/adreno_ringbuffer.c
75
36681
/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/firmware.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/log2.h> #include <linux/time.h> #include <linux/delay.h> #include "kgsl.h" #include "kgsl_sharedmem.h" #include "kgsl_cffdump.h" #include "kgsl_trace.h" #include "adreno.h" #include "adreno_pm4types.h" #include "adreno_ringbuffer.h" #include "a3xx_reg.h" #define GSL_RB_NOP_SIZEDWORDS 2 void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb) { struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device); BUG_ON(rb->wptr == 0); /* Let the pwrscale policy know that new commands have been submitted. */ kgsl_pwrscale_busy(rb->device); /*synchronize memory before informing the hardware of the *new commands. */ mb(); adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, rb->wptr); } static int adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, struct adreno_context *context, unsigned int numcmds, int wptr_ahead) { int nopcount; unsigned int freecmds; unsigned int *cmds; uint cmds_gpu; unsigned long wait_time; unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT); unsigned long wait_time_part; unsigned int rptr; /* if wptr ahead, fill the remaining with NOPs */ if (wptr_ahead) { /* -1 for header */ nopcount = rb->sizedwords - rb->wptr - 1; cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr; cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr; GSL_RB_WRITE(rb->device, cmds, cmds_gpu, cp_nop_packet(nopcount)); /* Make sure that rptr is not 0 before submitting * commands at the end of ringbuffer. We do not * want the rptr and wptr to become equal when * the ringbuffer is not empty */ do { rptr = adreno_get_rptr(rb); } while (!rptr); rb->wptr = 0; } wait_time = jiffies + wait_timeout; wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART); /* wait for space in ringbuffer */ while (1) { rptr = adreno_get_rptr(rb); freecmds = rptr - rb->wptr; if (freecmds == 0 || freecmds > numcmds) break; if (time_after(jiffies, wait_time)) { KGSL_DRV_ERR(rb->device, "Timed out while waiting for freespace in ringbuffer " "rptr: 0x%x, wptr: 0x%x\n", rptr, rb->wptr); return -ETIMEDOUT; } } return 0; } unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb, struct adreno_context *context, unsigned int numcmds) { unsigned int *ptr = NULL; int ret = 0; unsigned int rptr; BUG_ON(numcmds >= rb->sizedwords); rptr = adreno_get_rptr(rb); /* check for available space */ if (rb->wptr >= rptr) { /* wptr ahead or equal to rptr */ /* reserve dwords for nop packet */ if ((rb->wptr + numcmds) > (rb->sizedwords - GSL_RB_NOP_SIZEDWORDS)) ret = adreno_ringbuffer_waitspace(rb, context, numcmds, 1); } else { /* wptr behind rptr */ if ((rb->wptr + numcmds) >= rptr) ret = adreno_ringbuffer_waitspace(rb, context, numcmds, 0); /* check for remaining space */ /* reserve dwords for nop packet */ if (!ret && (rb->wptr + numcmds) > (rb->sizedwords - GSL_RB_NOP_SIZEDWORDS)) ret = adreno_ringbuffer_waitspace(rb, context, numcmds, 1); } if (!ret) { ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr; rb->wptr += numcmds; } else ptr = ERR_PTR(ret); return ptr; } static int _load_firmware(struct kgsl_device *device, const char *fwfile, void **data, int *len) { const struct firmware *fw = NULL; int ret; ret = request_firmware(&fw, fwfile, device->dev); if (ret) { KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n", fwfile, ret); return ret; } *data = kmalloc(fw->size, GFP_KERNEL); if (*data) { memcpy(*data, fw->data, fw->size); *len = fw->size; } release_firmware(fw); return (*data != NULL) ? 0 : -ENOMEM; } int adreno_ringbuffer_read_pm4_ucode(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int ret = 0; if (adreno_dev->pm4_fw == NULL) { int len; void *ptr; ret = _load_firmware(device, adreno_dev->pm4_fwfile, &ptr, &len); if (ret) goto err; /* PM4 size is 3 dword aligned plus 1 dword of version */ if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) { KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len); ret = -EINVAL; kfree(ptr); goto err; } adreno_dev->pm4_fw_size = len / sizeof(uint32_t); adreno_dev->pm4_fw = ptr; adreno_dev->pm4_fw_version = adreno_dev->pm4_fw[1]; } err: return ret; } /** * adreno_ringbuffer_load_pm4_ucode() - Load pm4 ucode * @device: Pointer to a KGSL device * @start: Starting index in pm4 ucode to load * @end: Ending index of pm4 ucode to load * @addr: Address to load the pm4 ucode * * Load the pm4 ucode from @start at @addr. */ static inline int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device, unsigned int start, unsigned int end, unsigned int addr) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int i; adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_RAM_WADDR, addr); for (i = start; i < end; i++) adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_RAM_DATA, adreno_dev->pm4_fw[i]); return 0; } int adreno_ringbuffer_read_pfp_ucode(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int ret = 0; if (adreno_dev->pfp_fw == NULL) { int len; void *ptr; ret = _load_firmware(device, adreno_dev->pfp_fwfile, &ptr, &len); if (ret) goto err; /* PFP size shold be dword aligned */ if (len % sizeof(uint32_t) != 0) { KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len); ret = -EINVAL; kfree(ptr); goto err; } adreno_dev->pfp_fw_size = len / sizeof(uint32_t); adreno_dev->pfp_fw = ptr; adreno_dev->pfp_fw_version = adreno_dev->pfp_fw[5]; } err: return ret; } /** * adreno_ringbuffer_load_pfp_ucode() - Load pfp ucode * @device: Pointer to a KGSL device * @start: Starting index in pfp ucode to load * @end: Ending index of pfp ucode to load * @addr: Address to load the pfp ucode * * Load the pfp ucode from @start at @addr. */ static inline int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device, unsigned int start, unsigned int end, unsigned int addr) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int i; adreno_writereg(adreno_dev, ADRENO_REG_CP_PFP_UCODE_ADDR, addr); for (i = start; i < end; i++) adreno_writereg(adreno_dev, ADRENO_REG_CP_PFP_UCODE_DATA, adreno_dev->pfp_fw[i]); return 0; } /** * _ringbuffer_bootstrap_ucode() - Bootstrap GPU Ucode * @rb: Pointer to adreno ringbuffer * @load_jt: If non zero only load Jump tables * * Bootstrap ucode for GPU * load_jt == 0, bootstrap full microcode * load_jt == 1, bootstrap jump tables of microcode * * For example a bootstrap packet would like below * Setup a type3 bootstrap packet * PFP size to bootstrap * PFP addr to write the PFP data * PM4 size to bootstrap * PM4 addr to write the PM4 data * PFP dwords from microcode to bootstrap * PM4 size dwords from microcode to bootstrap */ static int _ringbuffer_bootstrap_ucode(struct adreno_ringbuffer *rb, unsigned int load_jt) { unsigned int *cmds, cmds_gpu, bootstrap_size; int i = 0; struct kgsl_device *device = rb->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); unsigned int pm4_size, pm4_idx, pm4_addr, pfp_size, pfp_idx, pfp_addr; /* Only bootstrap jump tables of ucode */ if (load_jt) { pm4_idx = adreno_dev->pm4_jt_idx; pm4_addr = adreno_dev->pm4_jt_addr; pfp_idx = adreno_dev->pfp_jt_idx; pfp_addr = adreno_dev->pfp_jt_addr; } else { /* Bootstrap full ucode */ pm4_idx = 1; pm4_addr = 0; pfp_idx = 1; pfp_addr = 0; } pm4_size = (adreno_dev->pm4_fw_size - pm4_idx); pfp_size = (adreno_dev->pfp_fw_size - pfp_idx); /* * Below set of commands register with PFP that 6f is the * opcode for bootstrapping */ adreno_writereg(adreno_dev, ADRENO_REG_CP_PFP_UCODE_ADDR, 0x200); adreno_writereg(adreno_dev, ADRENO_REG_CP_PFP_UCODE_DATA, 0x6f0005); /* clear ME_HALT to start micro engine */ adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, 0); bootstrap_size = (pm4_size + pfp_size + 5); cmds = adreno_ringbuffer_allocspace(rb, NULL, bootstrap_size); if (cmds == NULL) return -ENOMEM; cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint) * (rb->wptr - bootstrap_size); /* Construct the packet that bootsraps the ucode */ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, cp_type3_packet(CP_BOOTSTRAP_UCODE, (bootstrap_size - 1))); GSL_RB_WRITE(rb->device, cmds, cmds_gpu, pfp_size); GSL_RB_WRITE(rb->device, cmds, cmds_gpu, pfp_addr); GSL_RB_WRITE(rb->device, cmds, cmds_gpu, pm4_size); GSL_RB_WRITE(rb->device, cmds, cmds_gpu, pm4_addr); for (i = pfp_idx; i < adreno_dev->pfp_fw_size; i++) GSL_RB_WRITE(rb->device, cmds, cmds_gpu, adreno_dev->pfp_fw[i]); for (i = pm4_idx; i < adreno_dev->pm4_fw_size; i++) GSL_RB_WRITE(rb->device, cmds, cmds_gpu, adreno_dev->pm4_fw[i]); adreno_ringbuffer_submit(rb); /* idle device to validate bootstrap */ return adreno_idle(device); } /** * _ringbuffer_setup_common() - Ringbuffer start * @rb: Pointer to adreno ringbuffer * * Setup ringbuffer for GPU. */ static void _ringbuffer_setup_common(struct adreno_ringbuffer *rb) { struct kgsl_device *device = rb->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); kgsl_sharedmem_set(rb->device, &rb->buffer_desc, 0, 0xAA, (rb->sizedwords << 2)); /* * The size of the ringbuffer in the hardware is the log2 * representation of the size in quadwords (sizedwords / 2). * Also disable the host RPTR shadow register as it might be unreliable * in certain circumstances. */ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL, (ilog2(rb->sizedwords >> 1) & 0x3F) | (1 << 27)); adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE, rb->buffer_desc.gpuaddr); /* setup scratch/timestamp */ adreno_writereg(adreno_dev, ADRENO_REG_SCRATCH_ADDR, device->memstore.gpuaddr + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, soptimestamp)); adreno_writereg(adreno_dev, ADRENO_REG_SCRATCH_UMSK, GSL_RB_MEMPTRS_SCRATCH_MASK); /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */ if (adreno_is_a305(adreno_dev) || adreno_is_a305c(adreno_dev) || adreno_is_a306(adreno_dev) || adreno_is_a320(adreno_dev)) kgsl_regwrite(device, A3XX_CP_QUEUE_THRESHOLDS, 0x000E0602); else if (adreno_is_a330(adreno_dev) || adreno_is_a305b(adreno_dev) || adreno_is_a310(adreno_dev)) kgsl_regwrite(device, A3XX_CP_QUEUE_THRESHOLDS, 0x003E2008); rb->wptr = 0; } /** * _ringbuffer_start_common() - Ringbuffer start * @rb: Pointer to adreno ringbuffer * * Start ringbuffer for GPU. */ static int _ringbuffer_start_common(struct adreno_ringbuffer *rb) { int status; struct kgsl_device *device = rb->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); /* clear ME_HALT to start micro engine */ adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, 0); /* ME init is GPU specific, so jump into the sub-function */ status = adreno_dev->gpudev->rb_init(adreno_dev, rb); if (status) return status; /* idle device to validate ME INIT */ status = adreno_spin_idle(device); if (status == 0) rb->flags |= KGSL_FLAGS_STARTED; return status; } /** * adreno_ringbuffer_warm_start() - Ringbuffer warm start * @rb: Pointer to adreno ringbuffer * * Start the ringbuffer but load only jump tables part of the * microcode. */ int adreno_ringbuffer_warm_start(struct adreno_ringbuffer *rb) { int status; struct kgsl_device *device = rb->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); if (rb->flags & KGSL_FLAGS_STARTED) return 0; _ringbuffer_setup_common(rb); /* If bootstrapping if supported to load jump tables */ if (adreno_bootstrap_ucode(adreno_dev)) { status = _ringbuffer_bootstrap_ucode(rb, 1); if (status != 0) return status; } else { /* load the CP jump tables using AHB writes */ status = adreno_ringbuffer_load_pm4_ucode(device, adreno_dev->pm4_jt_idx, adreno_dev->pm4_fw_size, adreno_dev->pm4_jt_addr); if (status != 0) return status; /* load the prefetch parser jump tables using AHB writes */ status = adreno_ringbuffer_load_pfp_ucode(device, adreno_dev->pfp_jt_idx, adreno_dev->pfp_fw_size, adreno_dev->pfp_jt_addr); if (status != 0) return status; } status = _ringbuffer_start_common(rb); return status; } /** * adreno_ringbuffer_cold_start() - Ringbuffer cold start * @rb: Pointer to adreno ringbuffer * * Start the ringbuffer from power collapse. */ int adreno_ringbuffer_cold_start(struct adreno_ringbuffer *rb) { int status; struct kgsl_device *device = rb->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); if (rb->flags & KGSL_FLAGS_STARTED) return 0; _ringbuffer_setup_common(rb); /* If bootstrapping if supported to load ucode */ if (adreno_bootstrap_ucode(adreno_dev)) { /* * load first adreno_dev->pm4_bstrp_size + * adreno_dev->pfp_bstrp_size microcode dwords using AHB write, * this small microcode has dispatcher + booter, this initial * microcode enables CP to understand CP_BOOTSTRAP_UCODE packet * in function _ringbuffer_bootstrap_ucode. CP_BOOTSTRAP_UCODE * packet loads rest of the microcode. */ status = adreno_ringbuffer_load_pm4_ucode(rb->device, 1, adreno_dev->pm4_bstrp_size+1, 0); if (status != 0) return status; status = adreno_ringbuffer_load_pfp_ucode(rb->device, 1, adreno_dev->pfp_bstrp_size+1, 0); if (status != 0) return status; /* Bootstrap rest of the ucode here */ status = _ringbuffer_bootstrap_ucode(rb, 0); if (status != 0) return status; } else { /* load the CP ucode using AHB writes */ status = adreno_ringbuffer_load_pm4_ucode(rb->device, 1, adreno_dev->pm4_fw_size, 0); if (status != 0) return status; /* load the prefetch parser ucode using AHB writes */ status = adreno_ringbuffer_load_pfp_ucode(rb->device, 1, adreno_dev->pfp_fw_size, 0); if (status != 0) return status; } status = _ringbuffer_start_common(rb); return status; } void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb) { rb->flags &= ~KGSL_FLAGS_STARTED; } int adreno_ringbuffer_init(struct kgsl_device *device) { int status; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; rb->device = device; /* * It is silly to convert this to words and then back to bytes * immediately below, but most of the rest of the code deals * in words, so we might as well only do the math once */ rb->sizedwords = KGSL_RB_SIZE >> 2; rb->buffer_desc.flags = KGSL_MEMFLAGS_GPUREADONLY; /* allocate memory for ringbuffer */ status = kgsl_allocate_contiguous(device, &rb->buffer_desc, (rb->sizedwords << 2)); if (status != 0) { adreno_ringbuffer_close(rb); return status; } rb->global_ts = 0; return 0; } void adreno_ringbuffer_close(struct adreno_ringbuffer *rb) { struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device); kgsl_sharedmem_free(&rb->buffer_desc); kfree(adreno_dev->pfp_fw); kfree(adreno_dev->pm4_fw); adreno_dev->pfp_fw = NULL; adreno_dev->pm4_fw = NULL; memset(rb, 0, sizeof(struct adreno_ringbuffer)); } static int adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, struct adreno_context *drawctxt, unsigned int flags, unsigned int *cmds, int sizedwords, uint32_t timestamp) { struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device); unsigned int *ringcmds; unsigned int total_sizedwords = sizedwords; unsigned int i; unsigned int rcmd_gpu; unsigned int context_id; unsigned int gpuaddr = rb->device->memstore.gpuaddr; bool profile_ready; if (drawctxt != NULL && kgsl_context_detached(&drawctxt->base)) return -EINVAL; rb->global_ts++; /* If this is a internal IB, use the global timestamp for it */ if (!drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) { timestamp = rb->global_ts; context_id = KGSL_MEMSTORE_GLOBAL; } else { context_id = drawctxt->base.id; } /* * Note that we cannot safely take drawctxt->mutex here without * potential mutex inversion with device->mutex which is held * here. As a result, any other code that accesses this variable * must also use device->mutex. */ if (drawctxt) drawctxt->internal_timestamp = rb->global_ts; /* * If in stream ib profiling is enabled and there are counters * assigned, then space needs to be reserved for profiling. This * space in the ringbuffer is always consumed (might be filled with * NOPs in error case. profile_ready needs to be consistent through * the _addcmds call since it is allocating additional ringbuffer * command space. */ profile_ready = drawctxt && adreno_profile_assignments_ready(&adreno_dev->profile) && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE); /* reserve space to temporarily turn off protected mode * error checking if needed */ total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0; /* 2 dwords to store the start of command sequence */ total_sizedwords += 2; /* internal ib command identifier for the ringbuffer */ total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0; /* Add two dwords for the CP_INTERRUPT */ total_sizedwords += (drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) ? 2 : 0; /* context rollover */ if (adreno_is_a3xx(adreno_dev)) total_sizedwords += 3; /* For HLSQ updates below */ if (adreno_is_a4xx(adreno_dev) || adreno_is_a3xx(adreno_dev)) total_sizedwords += 4; total_sizedwords += 3; /* sop timestamp */ total_sizedwords += 4; /* eop timestamp */ if (drawctxt) { total_sizedwords += 3; /* global timestamp without cache * flush for non-zero context */ } if (flags & KGSL_CMD_FLAGS_WFI) total_sizedwords += 2; /* WFI */ if (profile_ready) total_sizedwords += 6; /* space for pre_ib and post_ib */ /* Add space for the power on shader fixup if we need it */ if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP) total_sizedwords += 9; ringcmds = adreno_ringbuffer_allocspace(rb, drawctxt, total_sizedwords); if (IS_ERR(ringcmds)) return PTR_ERR(ringcmds); if (ringcmds == NULL) return -ENOSPC; rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-total_sizedwords); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_nop_packet(1)); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER); if (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) { GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_nop_packet(1)); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, KGSL_CMD_INTERNAL_IDENTIFIER); } if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP) { /* Disable protected mode for the fixup */ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_type3_packet(CP_SET_PROTECTED_MODE, 1)); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_nop_packet(1)); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, KGSL_PWRON_FIXUP_IDENTIFIER); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, CP_HDR_INDIRECT_BUFFER_PFD); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, adreno_dev->pwron_fixup.gpuaddr); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, adreno_dev->pwron_fixup_dwords); /* Re-enable protected mode */ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_type3_packet(CP_SET_PROTECTED_MODE, 1)); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 1); } /* Add any IB required for profiling if it is enabled */ if (profile_ready) adreno_profile_preib_processing(rb->device, drawctxt, &flags, &ringcmds, &rcmd_gpu); /* start-of-pipeline timestamp */ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_type3_packet(CP_MEM_WRITE, 2)); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr + KGSL_MEMSTORE_OFFSET(context_id, soptimestamp))); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, timestamp); if (flags & KGSL_CMD_FLAGS_PMODE) { /* disable protected mode error checking */ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_type3_packet(CP_SET_PROTECTED_MODE, 1)); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0); } for (i = 0; i < sizedwords; i++) { GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, *cmds); cmds++; } if (flags & KGSL_CMD_FLAGS_PMODE) { /* re-enable protected mode error checking */ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_type3_packet(CP_SET_PROTECTED_MODE, 1)); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 1); } /* * Flush HLSQ lazy updates to make sure there are no * resources pending for indirect loads after the timestamp */ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_type3_packet(CP_EVENT_WRITE, 1)); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x07); /* HLSQ_FLUSH */ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_type3_packet(CP_WAIT_FOR_IDLE, 1)); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x00); /* Add any postIB required for profiling if it is enabled and has assigned counters */ if (profile_ready) adreno_profile_postib_processing(rb->device, &flags, &ringcmds, &rcmd_gpu); /* * end-of-pipeline timestamp. If per context timestamps is not * enabled, then context_id will be KGSL_MEMSTORE_GLOBAL so all * eop timestamps will work out. */ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_type3_packet(CP_EVENT_WRITE, 3)); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, CACHE_FLUSH_TS); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr + KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp))); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, timestamp); if (drawctxt) { GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_type3_packet(CP_MEM_WRITE, 2)); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, eoptimestamp))); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, rb->global_ts); } if (drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) { GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_type3_packet(CP_INTERRUPT, 1)); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, CP_INTERRUPT_RB); } if (adreno_is_a3xx(adreno_dev)) { /* Dummy set-constant to trigger context rollover */ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_type3_packet(CP_SET_CONSTANT, 2)); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (0x4<<16)|(A3XX_HLSQ_CL_KERNEL_GROUP_X_REG - 0x2000)); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0); } if (flags & KGSL_CMD_FLAGS_WFI) { GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_type3_packet(CP_WAIT_FOR_IDLE, 1)); GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x00000000); } adreno_ringbuffer_submit(rb); return 0; } unsigned int adreno_ringbuffer_issuecmds(struct kgsl_device *device, struct adreno_context *drawctxt, unsigned int flags, unsigned int *cmds, int sizedwords) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; flags |= KGSL_CMD_FLAGS_INTERNAL_ISSUE; return adreno_ringbuffer_addcmds(rb, drawctxt, flags, cmds, sizedwords, 0); } static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr, int sizedwords); static bool _handle_type3(struct kgsl_device_private *dev_priv, uint *hostaddr) { unsigned int opcode = cp_type3_opcode(*hostaddr); switch (opcode) { case CP_INDIRECT_BUFFER_PFD: case CP_INDIRECT_BUFFER_PFE: case CP_COND_INDIRECT_BUFFER_PFE: case CP_COND_INDIRECT_BUFFER_PFD: return _parse_ibs(dev_priv, hostaddr[1], hostaddr[2]); case CP_NOP: case CP_WAIT_FOR_IDLE: case CP_WAIT_REG_MEM: case CP_WAIT_REG_EQ: case CP_WAT_REG_GTE: case CP_WAIT_UNTIL_READ: case CP_WAIT_IB_PFD_COMPLETE: case CP_REG_RMW: case CP_REG_TO_MEM: case CP_MEM_WRITE: case CP_MEM_WRITE_CNTR: case CP_COND_EXEC: case CP_COND_WRITE: case CP_EVENT_WRITE: case CP_EVENT_WRITE_SHD: case CP_EVENT_WRITE_CFL: case CP_EVENT_WRITE_ZPD: case CP_DRAW_INDX: case CP_DRAW_INDX_2: case CP_DRAW_INDX_BIN: case CP_DRAW_INDX_2_BIN: case CP_VIZ_QUERY: case CP_SET_STATE: case CP_SET_CONSTANT: case CP_IM_LOAD: case CP_IM_LOAD_IMMEDIATE: case CP_LOAD_CONSTANT_CONTEXT: case CP_INVALIDATE_STATE: case CP_SET_SHADER_BASES: case CP_SET_BIN_MASK: case CP_SET_BIN_SELECT: case CP_SET_BIN_BASE_OFFSET: case CP_SET_BIN_DATA: case CP_CONTEXT_UPDATE: case CP_INTERRUPT: case CP_IM_STORE: case CP_LOAD_STATE: break; /* these shouldn't come from userspace */ case CP_ME_INIT: case CP_SET_PROTECTED_MODE: default: KGSL_CMD_ERR(dev_priv->device, "bad CP opcode %0x\n", opcode); return false; break; } return true; } static bool _handle_type0(struct kgsl_device_private *dev_priv, uint *hostaddr) { unsigned int reg = type0_pkt_offset(*hostaddr); unsigned int cnt = type0_pkt_size(*hostaddr); if (reg < 0x0192 || (reg + cnt) >= 0x8000) { KGSL_CMD_ERR(dev_priv->device, "bad type0 reg: 0x%0x cnt: %d\n", reg, cnt); return false; } return true; } /* * Traverse IBs and dump them to test vector. Detect swap by inspecting * register writes, keeping note of the current state, and dump * framebuffer config to test vector */ static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr, int sizedwords) { static uint level; /* recursion level */ bool ret = false; uint *hostaddr, *hoststart; int dwords_left = sizedwords; /* dwords left in the current command buffer */ struct kgsl_mem_entry *entry; entry = kgsl_sharedmem_find_region(dev_priv->process_priv, gpuaddr, sizedwords * sizeof(uint)); if (entry == NULL) { KGSL_CMD_ERR(dev_priv->device, "no mapping for gpuaddr: 0x%08x\n", gpuaddr); return false; } hostaddr = kgsl_gpuaddr_to_vaddr(&entry->memdesc, gpuaddr); if (hostaddr == NULL) { KGSL_CMD_ERR(dev_priv->device, "no mapping for gpuaddr: 0x%08x\n", gpuaddr); return false; } hoststart = hostaddr; level++; KGSL_CMD_INFO(dev_priv->device, "ib: gpuaddr:0x%08x, wc:%d, hptr:%p\n", gpuaddr, sizedwords, hostaddr); mb(); while (dwords_left > 0) { bool cur_ret = true; int count = 0; /* dword count including packet header */ switch (*hostaddr >> 30) { case 0x0: /* type-0 */ count = (*hostaddr >> 16)+2; cur_ret = _handle_type0(dev_priv, hostaddr); break; case 0x1: /* type-1 */ count = 2; break; case 0x3: /* type-3 */ count = ((*hostaddr >> 16) & 0x3fff) + 2; cur_ret = _handle_type3(dev_priv, hostaddr); break; default: KGSL_CMD_ERR(dev_priv->device, "unexpected type: " "type:%d, word:0x%08x @ 0x%p, gpu:0x%08x\n", *hostaddr >> 30, *hostaddr, hostaddr, gpuaddr+4*(sizedwords-dwords_left)); cur_ret = false; count = dwords_left; break; } if (!cur_ret) { KGSL_CMD_ERR(dev_priv->device, "bad sub-type: #:%d/%d, v:0x%08x" " @ 0x%p[gb:0x%08x], level:%d\n", sizedwords-dwords_left, sizedwords, *hostaddr, hostaddr, gpuaddr+4*(sizedwords-dwords_left), level); if (ADRENO_DEVICE(dev_priv->device)->ib_check_level >= 2) print_hex_dump(KERN_ERR, level == 1 ? "IB1:" : "IB2:", DUMP_PREFIX_OFFSET, 32, 4, hoststart, sizedwords*4, 0); goto done; } /* jump to next packet */ dwords_left -= count; hostaddr += count; if (dwords_left < 0) { KGSL_CMD_ERR(dev_priv->device, "bad count: c:%d, #:%d/%d, " "v:0x%08x @ 0x%p[gb:0x%08x], level:%d\n", count, sizedwords-(dwords_left+count), sizedwords, *(hostaddr-count), hostaddr-count, gpuaddr+4*(sizedwords-(dwords_left+count)), level); if (ADRENO_DEVICE(dev_priv->device)->ib_check_level >= 2) print_hex_dump(KERN_ERR, level == 1 ? "IB1:" : "IB2:", DUMP_PREFIX_OFFSET, 32, 4, hoststart, sizedwords*4, 0); goto done; } } ret = true; done: if (!ret) KGSL_DRV_ERR(dev_priv->device, "parsing failed: gpuaddr:0x%08x, " "host:0x%p, wc:%d\n", gpuaddr, hoststart, sizedwords); level--; return ret; } /** * _ringbuffer_verify_ib() - parse an IB and verify that it is correct * @dev_priv: Pointer to the process struct * @ibdesc: Pointer to the IB descriptor * * This function only gets called if debugging is enabled - it walks the IB and * does additional level parsing and verification above and beyond what KGSL * core does */ static inline bool _ringbuffer_verify_ib(struct kgsl_device_private *dev_priv, struct kgsl_ibdesc *ibdesc) { struct kgsl_device *device = dev_priv->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); /* Check that the size of the IBs is under the allowable limit */ if (ibdesc->sizedwords == 0 || ibdesc->sizedwords > 0xFFFFF) { KGSL_DRV_ERR(device, "Invalid IB size 0x%zX\n", ibdesc->sizedwords); return false; } if (unlikely(adreno_dev->ib_check_level >= 1) && !_parse_ibs(dev_priv, ibdesc->gpuaddr, ibdesc->sizedwords)) { KGSL_DRV_ERR(device, "Could not verify the IBs\n"); return false; } return true; } int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv, struct kgsl_context *context, struct kgsl_cmdbatch *cmdbatch, uint32_t *timestamp) { struct kgsl_device *device = dev_priv->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_context *drawctxt = ADRENO_CONTEXT(context); int i, ret; if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) return -EDEADLK; /* Verify the IBs before they get queued */ for (i = 0; i < cmdbatch->ibcount; i++) { if (!_ringbuffer_verify_ib(dev_priv, &cmdbatch->ibdesc[i])) return -EINVAL; } /* wait for the suspend gate */ wait_for_completion(&device->cmdbatch_gate); /* * Clear the wake on touch bit to indicate an IB has been submitted * since the last time we set it */ device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH; /* Queue the command in the ringbuffer */ ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch, timestamp); /* * Return -EPROTO if the device has faulted since the last time we * checked - userspace uses this to perform post-fault activities */ if (!ret && test_and_clear_bit(ADRENO_CONTEXT_FAULT, &drawctxt->priv)) ret = -EPROTO; return ret; } unsigned int adreno_ringbuffer_get_constraint(struct kgsl_device *device, struct kgsl_context *context) { unsigned int pwrlevel = device->pwrctrl.active_pwrlevel; switch (context->pwr_constraint.type) { case KGSL_CONSTRAINT_PWRLEVEL: { switch (context->pwr_constraint.sub_type) { case KGSL_CONSTRAINT_PWR_MAX: pwrlevel = device->pwrctrl.max_pwrlevel; break; case KGSL_CONSTRAINT_PWR_MIN: pwrlevel = device->pwrctrl.min_pwrlevel; break; default: break; } } break; } return pwrlevel; } void adreno_ringbuffer_set_constraint(struct kgsl_device *device, struct kgsl_cmdbatch *cmdbatch) { unsigned int constraint; struct kgsl_context *context = cmdbatch->context; /* * Check if the context has a constraint and constraint flags are * set. */ if (context->pwr_constraint.type && ((context->flags & KGSL_CONTEXT_PWR_CONSTRAINT) || (cmdbatch->flags & KGSL_CONTEXT_PWR_CONSTRAINT))) { constraint = adreno_ringbuffer_get_constraint(device, context); /* * If a constraint is already set, set a new * constraint only if it is faster */ if ((device->pwrctrl.constraint.type == KGSL_CONSTRAINT_NONE) || (constraint < device->pwrctrl.constraint.hint.pwrlevel.level)) { kgsl_pwrctrl_pwrlevel_change(device, constraint); device->pwrctrl.constraint.type = context->pwr_constraint.type; device->pwrctrl.constraint.hint. pwrlevel.level = constraint; } device->pwrctrl.constraint.expires = jiffies + device->pwrctrl.interval_timeout; } } /* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, struct kgsl_cmdbatch *cmdbatch) { struct kgsl_device *device = &adreno_dev->dev; struct kgsl_ibdesc *ibdesc; unsigned int numibs; unsigned int *link; unsigned int *cmds; unsigned int i; struct kgsl_context *context; struct adreno_context *drawctxt; unsigned int start_index = 0; int flags = KGSL_CMD_FLAGS_NONE; int ret; context = cmdbatch->context; drawctxt = ADRENO_CONTEXT(context); ibdesc = cmdbatch->ibdesc; numibs = cmdbatch->ibcount; /* process any profiling results that are available into the log_buf */ adreno_profile_process_results(device); /* * If SKIP CMD flag is set for current context * a) set SKIPCMD as fault_recovery for current commandbatch * b) store context's commandbatch fault_policy in current * commandbatch fault_policy and clear context's commandbatch * fault_policy * c) force preamble for commandbatch */ if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->priv) && (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv))) { set_bit(KGSL_FT_SKIPCMD, &cmdbatch->fault_recovery); cmdbatch->fault_policy = drawctxt->fault_policy; set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv); /* if context is detached print fault recovery */ adreno_fault_skipcmd_detached(device, drawctxt, cmdbatch); /* clear the drawctxt flags */ clear_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->priv); drawctxt->fault_policy = 0; } /*When preamble is enabled, the preamble buffer with state restoration commands are stored in the first node of the IB chain. We can skip that if a context switch hasn't occured */ if ((drawctxt->base.flags & KGSL_CONTEXT_PREAMBLE) && !test_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv) && (adreno_dev->drawctxt_active == drawctxt)) start_index = 1; /* * In skip mode don't issue the draw IBs but keep all the other * accoutrements of a submision (including the interrupt) to keep * the accounting sane. Set start_index and numibs to 0 to just * generate the start and end markers and skip everything else */ if (test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)) { start_index = 0; numibs = 0; } cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4), GFP_KERNEL); if (!link) { ret = -ENOMEM; goto done; } if (!start_index) { *cmds++ = cp_nop_packet(1); *cmds++ = KGSL_START_OF_IB_IDENTIFIER; } else { *cmds++ = cp_nop_packet(4); *cmds++ = KGSL_START_OF_IB_IDENTIFIER; *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD; *cmds++ = ibdesc[0].gpuaddr; *cmds++ = ibdesc[0].sizedwords; } for (i = start_index; i < numibs; i++) { /* * Skip 0 sized IBs - these are presumed to have been removed * from consideration by the FT policy */ if (ibdesc[i].sizedwords == 0) *cmds++ = cp_nop_packet(2); else *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD; *cmds++ = ibdesc[i].gpuaddr; *cmds++ = ibdesc[i].sizedwords; } *cmds++ = cp_nop_packet(1); *cmds++ = KGSL_END_OF_IB_IDENTIFIER; ret = kgsl_setstate(&device->mmu, context->id, kgsl_mmu_pt_get_flags(device->mmu.hwpagetable, device->id)); if (ret) goto done; ret = adreno_drawctxt_switch(adreno_dev, drawctxt, cmdbatch->flags); /* * In the unlikely event of an error in the drawctxt switch, * treat it like a hang */ if (ret) goto done; if (test_bit(CMDBATCH_FLAG_WFI, &cmdbatch->priv)) flags = KGSL_CMD_FLAGS_WFI; /* * For some targets, we need to execute a dummy shader operation after a * power collapse */ if (test_and_clear_bit(ADRENO_DEVICE_PWRON, &adreno_dev->priv) && test_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv)) flags |= KGSL_CMD_FLAGS_PWRON_FIXUP; /* Set the constraints before adding to ringbuffer */ adreno_ringbuffer_set_constraint(device, cmdbatch); /* CFF stuff executed only if CFF is enabled */ kgsl_cffdump_capture_ib_desc(device, context, ibdesc, numibs); ret = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer, drawctxt, flags, &link[0], (cmds - link), cmdbatch->timestamp); kgsl_cffdump_regpoll(device, adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2, 0x00000000, 0x80000000); done: trace_kgsl_issueibcmds(device, context->id, cmdbatch, cmdbatch->timestamp, cmdbatch->flags, ret, drawctxt->type); kfree(link); return ret; }
gpl-2.0
zarboz/Evita_UL_422-JB
arch/arm/mm/proc-syms.c
331
1029
/* * linux/arch/arm/mm/proc-syms.c * * Copyright (C) 2000-2002 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/mm.h> #include <asm/cacheflush.h> #include <asm/proc-fns.h> #include <asm/tlbflush.h> #include <asm/page.h> #ifndef MULTI_CPU EXPORT_SYMBOL(cpu_dcache_clean_area); EXPORT_SYMBOL(cpu_set_pte_ext); #else EXPORT_SYMBOL(processor); #endif #ifndef MULTI_CACHE EXPORT_SYMBOL(__cpuc_flush_kern_all); EXPORT_SYMBOL(__cpuc_flush_user_all); EXPORT_SYMBOL(__cpuc_flush_user_range); EXPORT_SYMBOL(__cpuc_coherent_kern_range); EXPORT_SYMBOL(__cpuc_flush_dcache_area); #else EXPORT_SYMBOL(cpu_cache); #endif #ifdef CONFIG_MMU #ifndef MULTI_USER EXPORT_SYMBOL(__cpu_clear_user_highpage); EXPORT_SYMBOL(__cpu_copy_user_highpage); #else EXPORT_SYMBOL(cpu_user); #endif #endif #ifdef MULTI_TLB EXPORT_SYMBOL(cpu_tlb); #endif
gpl-2.0
squllcx/Axon7
drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
331
14461
/* IEEE 802.11 SoftMAC layer * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com> * * Mostly extracted from the rtl8180-sa2400 driver for the * in-kernel generic ieee802.11 stack. * * Some pieces of code might be stolen from ipw2100 driver * copyright of who own it's copyright ;-) * * PS wx handler mostly stolen from hostap, copyright who * own it's copyright ;-) * * released under the GPL */ #include <linux/etherdevice.h> #include "ieee80211.h" #include "dot11d.h" /* FIXME: add A freqs */ const long ieee80211_wlan_frequencies[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484 }; EXPORT_SYMBOL(ieee80211_wlan_frequencies); int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct iw_freq *fwrq = & wrqu->freq; down(&ieee->wx_sem); if(ieee->iw_mode == IW_MODE_INFRA){ ret = -EOPNOTSUPP; goto out; } /* if setting by freq convert to channel */ if (fwrq->e == 1) { if ((fwrq->m >= (int) 2.412e8 && fwrq->m <= (int) 2.487e8)) { int f = fwrq->m / 100000; int c = 0; while ((c < 14) && (f != ieee80211_wlan_frequencies[c])) c++; /* hack to fall through */ fwrq->e = 0; fwrq->m = c + 1; } } if (fwrq->e > 0 || fwrq->m > 14 || fwrq->m < 1 ){ ret = -EOPNOTSUPP; goto out; }else { /* Set the channel */ if (!(GET_DOT11D_INFO(ieee)->channel_map)[fwrq->m]) { ret = -EINVAL; goto out; } ieee->current_network.channel = fwrq->m; ieee->set_chan(ieee->dev, ieee->current_network.channel); if(ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER) if(ieee->state == IEEE80211_LINKED){ ieee80211_stop_send_beacons(ieee); ieee80211_start_send_beacons(ieee); } } ret = 0; out: up(&ieee->wx_sem); return ret; } EXPORT_SYMBOL(ieee80211_wx_set_freq); int ieee80211_wx_get_freq(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct iw_freq *fwrq = & wrqu->freq; if (ieee->current_network.channel == 0) return -1; //NM 0.7.0 will not accept channel any more. fwrq->m = ieee80211_wlan_frequencies[ieee->current_network.channel-1] * 100000; fwrq->e = 1; // fwrq->m = ieee->current_network.channel; // fwrq->e = 0; return 0; } EXPORT_SYMBOL(ieee80211_wx_get_freq); int ieee80211_wx_get_wap(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { unsigned long flags; wrqu->ap_addr.sa_family = ARPHRD_ETHER; if (ieee->iw_mode == IW_MODE_MONITOR) return -1; /* We want avoid to give to the user inconsistent infos*/ spin_lock_irqsave(&ieee->lock, flags); if (ieee->state != IEEE80211_LINKED && ieee->state != IEEE80211_LINKED_SCANNING && ieee->wap_set == 0) memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); else memcpy(wrqu->ap_addr.sa_data, ieee->current_network.bssid, ETH_ALEN); spin_unlock_irqrestore(&ieee->lock, flags); return 0; } EXPORT_SYMBOL(ieee80211_wx_get_wap); int ieee80211_wx_set_wap(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *awrq, char *extra) { int ret = 0; unsigned long flags; short ifup = ieee->proto_started;//dev->flags & IFF_UP; struct sockaddr *temp = (struct sockaddr *)awrq; ieee->sync_scan_hurryup = 1; down(&ieee->wx_sem); /* use ifconfig hw ether */ if (ieee->iw_mode == IW_MODE_MASTER){ ret = -1; goto out; } if (temp->sa_family != ARPHRD_ETHER){ ret = -EINVAL; goto out; } if (ifup) ieee80211_stop_protocol(ieee); /* just to avoid to give inconsistent infos in the * get wx method. not really needed otherwise */ spin_lock_irqsave(&ieee->lock, flags); memcpy(ieee->current_network.bssid, temp->sa_data, ETH_ALEN); ieee->wap_set = !is_zero_ether_addr(temp->sa_data); spin_unlock_irqrestore(&ieee->lock, flags); if (ifup) ieee80211_start_protocol(ieee); out: up(&ieee->wx_sem); return ret; } EXPORT_SYMBOL(ieee80211_wx_set_wap); int ieee80211_wx_get_essid(struct ieee80211_device *ieee, struct iw_request_info *a,union iwreq_data *wrqu,char *b) { int len, ret = 0; unsigned long flags; if (ieee->iw_mode == IW_MODE_MONITOR) return -1; /* We want avoid to give to the user inconsistent infos*/ spin_lock_irqsave(&ieee->lock, flags); if (ieee->current_network.ssid[0] == '\0' || ieee->current_network.ssid_len == 0){ ret = -1; goto out; } if (ieee->state != IEEE80211_LINKED && ieee->state != IEEE80211_LINKED_SCANNING && ieee->ssid_set == 0){ ret = -1; goto out; } len = ieee->current_network.ssid_len; wrqu->essid.length = len; strncpy(b, ieee->current_network.ssid, len); wrqu->essid.flags = 1; out: spin_unlock_irqrestore(&ieee->lock, flags); return ret; } EXPORT_SYMBOL(ieee80211_wx_get_essid); int ieee80211_wx_set_rate(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { u32 target_rate = wrqu->bitrate.value; ieee->rate = target_rate/100000; //FIXME: we might want to limit rate also in management protocols. return 0; } EXPORT_SYMBOL(ieee80211_wx_set_rate); int ieee80211_wx_get_rate(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { u32 tmp_rate; tmp_rate = TxCountToDataRate(ieee, ieee->softmac_stats.CurrentShowTxate); wrqu->bitrate.value = tmp_rate * 500000; return 0; } EXPORT_SYMBOL(ieee80211_wx_get_rate); int ieee80211_wx_set_rts(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { if (wrqu->rts.disabled || !wrqu->rts.fixed) ieee->rts = DEFAULT_RTS_THRESHOLD; else { if (wrqu->rts.value < MIN_RTS_THRESHOLD || wrqu->rts.value > MAX_RTS_THRESHOLD) return -EINVAL; ieee->rts = wrqu->rts.value; } return 0; } EXPORT_SYMBOL(ieee80211_wx_set_rts); int ieee80211_wx_get_rts(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { wrqu->rts.value = ieee->rts; wrqu->rts.fixed = 0; /* no auto select */ wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); return 0; } EXPORT_SYMBOL(ieee80211_wx_get_rts); int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { ieee->sync_scan_hurryup = 1; down(&ieee->wx_sem); if (wrqu->mode == ieee->iw_mode) goto out; if (wrqu->mode == IW_MODE_MONITOR){ ieee->dev->type = ARPHRD_IEEE80211; }else{ ieee->dev->type = ARPHRD_ETHER; } if (!ieee->proto_started){ ieee->iw_mode = wrqu->mode; }else{ ieee80211_stop_protocol(ieee); ieee->iw_mode = wrqu->mode; ieee80211_start_protocol(ieee); } out: up(&ieee->wx_sem); return 0; } EXPORT_SYMBOL(ieee80211_wx_set_mode); void ieee80211_wx_sync_scan_wq(struct work_struct *work) { struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, wx_sync_scan_wq); short chan; HT_EXTCHNL_OFFSET chan_offset=0; HT_CHANNEL_WIDTH bandwidth=0; int b40M = 0; static int count; chan = ieee->current_network.channel; netif_carrier_off(ieee->dev); if (ieee->data_hard_stop) ieee->data_hard_stop(ieee->dev); ieee80211_stop_send_beacons(ieee); ieee->state = IEEE80211_LINKED_SCANNING; ieee->link_change(ieee->dev); ieee->InitialGainHandler(ieee->dev, IG_Backup); if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT && ieee->pHTInfo->bCurBW40MHz) { b40M = 1; chan_offset = ieee->pHTInfo->CurSTAExtChnlOffset; bandwidth = (HT_CHANNEL_WIDTH)ieee->pHTInfo->bCurBW40MHz; printk("Scan in 40M, force to 20M first:%d, %d\n", chan_offset, bandwidth); ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT); } ieee80211_start_scan_syncro(ieee); if (b40M) { printk("Scan in 20M, back to 40M\n"); if (chan_offset == HT_EXTCHNL_OFFSET_UPPER) ieee->set_chan(ieee->dev, chan + 2); else if (chan_offset == HT_EXTCHNL_OFFSET_LOWER) ieee->set_chan(ieee->dev, chan - 2); else ieee->set_chan(ieee->dev, chan); ieee->SetBWModeHandler(ieee->dev, bandwidth, chan_offset); } else { ieee->set_chan(ieee->dev, chan); } ieee->InitialGainHandler(ieee->dev, IG_Restore); ieee->state = IEEE80211_LINKED; ieee->link_change(ieee->dev); // To prevent the immediately calling watch_dog after scan. if(ieee->LinkDetectInfo.NumRecvBcnInPeriod==0||ieee->LinkDetectInfo.NumRecvDataInPeriod==0 ) { ieee->LinkDetectInfo.NumRecvBcnInPeriod = 1; ieee->LinkDetectInfo.NumRecvDataInPeriod= 1; } if (ieee->data_hard_resume) ieee->data_hard_resume(ieee->dev); if(ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER) ieee80211_start_send_beacons(ieee); netif_carrier_on(ieee->dev); count = 0; up(&ieee->wx_sem); } int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret = 0; down(&ieee->wx_sem); if (ieee->iw_mode == IW_MODE_MONITOR || !(ieee->proto_started)){ ret = -1; goto out; } if ( ieee->state == IEEE80211_LINKED){ queue_work(ieee->wq, &ieee->wx_sync_scan_wq); /* intentionally forget to up sem */ return 0; } out: up(&ieee->wx_sem); return ret; } EXPORT_SYMBOL(ieee80211_wx_set_scan); int ieee80211_wx_set_essid(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *extra) { int ret=0,len; short proto_started; unsigned long flags; ieee->sync_scan_hurryup = 1; down(&ieee->wx_sem); proto_started = ieee->proto_started; if (wrqu->essid.length > IW_ESSID_MAX_SIZE){ ret= -E2BIG; goto out; } if (ieee->iw_mode == IW_MODE_MONITOR){ ret= -1; goto out; } if(proto_started) ieee80211_stop_protocol(ieee); /* this is just to be sure that the GET wx callback * has consisten infos. not needed otherwise */ spin_lock_irqsave(&ieee->lock, flags); if (wrqu->essid.flags && wrqu->essid.length) { //first flush current network.ssid len = ((wrqu->essid.length-1) < IW_ESSID_MAX_SIZE) ? (wrqu->essid.length-1) : IW_ESSID_MAX_SIZE; strncpy(ieee->current_network.ssid, extra, len+1); ieee->current_network.ssid_len = len+1; ieee->ssid_set = 1; } else{ ieee->ssid_set = 0; ieee->current_network.ssid[0] = '\0'; ieee->current_network.ssid_len = 0; } spin_unlock_irqrestore(&ieee->lock, flags); if (proto_started) ieee80211_start_protocol(ieee); out: up(&ieee->wx_sem); return ret; } EXPORT_SYMBOL(ieee80211_wx_set_essid); int ieee80211_wx_get_mode(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { wrqu->mode = ieee->iw_mode; return 0; } EXPORT_SYMBOL(ieee80211_wx_get_mode); int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int *parms = (int *)extra; int enable = (parms[0] > 0); short prev = ieee->raw_tx; down(&ieee->wx_sem); if(enable) ieee->raw_tx = 1; else ieee->raw_tx = 0; printk(KERN_INFO"raw TX is %s\n", ieee->raw_tx ? "enabled" : "disabled"); if(ieee->iw_mode == IW_MODE_MONITOR) { if(prev == 0 && ieee->raw_tx){ if (ieee->data_hard_resume) ieee->data_hard_resume(ieee->dev); netif_carrier_on(ieee->dev); } if(prev && ieee->raw_tx == 1) netif_carrier_off(ieee->dev); } up(&ieee->wx_sem); return 0; } EXPORT_SYMBOL(ieee80211_wx_set_rawtx); int ieee80211_wx_get_name(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { strlcpy(wrqu->name, "802.11", IFNAMSIZ); if (ieee->modulation & IEEE80211_CCK_MODULATION) { strlcat(wrqu->name, "b", IFNAMSIZ); if (ieee->modulation & IEEE80211_OFDM_MODULATION) strlcat(wrqu->name, "/g", IFNAMSIZ); } else if (ieee->modulation & IEEE80211_OFDM_MODULATION) { strlcat(wrqu->name, "g", IFNAMSIZ); } if (ieee->mode & (IEEE_N_24G | IEEE_N_5G)) strlcat(wrqu->name, "/n", IFNAMSIZ); if ((ieee->state == IEEE80211_LINKED) || (ieee->state == IEEE80211_LINKED_SCANNING)) strlcat(wrqu->name, " linked", IFNAMSIZ); else if (ieee->state != IEEE80211_NOLINK) strlcat(wrqu->name, " link..", IFNAMSIZ); return 0; } EXPORT_SYMBOL(ieee80211_wx_get_name); /* this is mostly stolen from hostap */ int ieee80211_wx_set_power(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; down(&ieee->wx_sem); if (wrqu->power.disabled){ ieee->ps = IEEE80211_PS_DISABLED; goto exit; } if (wrqu->power.flags & IW_POWER_TIMEOUT) { //ieee->ps_period = wrqu->power.value / 1000; ieee->ps_timeout = wrqu->power.value / 1000; } if (wrqu->power.flags & IW_POWER_PERIOD) { //ieee->ps_timeout = wrqu->power.value / 1000; ieee->ps_period = wrqu->power.value / 1000; //wrq->value / 1024; } switch (wrqu->power.flags & IW_POWER_MODE) { case IW_POWER_UNICAST_R: ieee->ps = IEEE80211_PS_UNICAST; break; case IW_POWER_MULTICAST_R: ieee->ps = IEEE80211_PS_MBCAST; break; case IW_POWER_ALL_R: ieee->ps = IEEE80211_PS_UNICAST | IEEE80211_PS_MBCAST; break; case IW_POWER_ON: // ieee->ps = IEEE80211_PS_DISABLED; break; default: ret = -EINVAL; goto exit; } exit: up(&ieee->wx_sem); return ret; } EXPORT_SYMBOL(ieee80211_wx_set_power); /* this is stolen from hostap */ int ieee80211_wx_get_power(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { down(&ieee->wx_sem); if(ieee->ps == IEEE80211_PS_DISABLED){ wrqu->power.disabled = 1; goto exit; } wrqu->power.disabled = 0; if ((wrqu->power.flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { wrqu->power.flags = IW_POWER_TIMEOUT; wrqu->power.value = ieee->ps_timeout * 1000; } else { // ret = -EOPNOTSUPP; // goto exit; wrqu->power.flags = IW_POWER_PERIOD; wrqu->power.value = ieee->ps_period * 1000; //ieee->current_network.dtim_period * ieee->current_network.beacon_interval * 1024; } if ((ieee->ps & (IEEE80211_PS_MBCAST | IEEE80211_PS_UNICAST)) == (IEEE80211_PS_MBCAST | IEEE80211_PS_UNICAST)) wrqu->power.flags |= IW_POWER_ALL_R; else if (ieee->ps & IEEE80211_PS_MBCAST) wrqu->power.flags |= IW_POWER_MULTICAST_R; else wrqu->power.flags |= IW_POWER_UNICAST_R; exit: up(&ieee->wx_sem); return 0; } EXPORT_SYMBOL(ieee80211_wx_get_power);
gpl-2.0
UnicronNL/vyos-kernel-utilite
drivers/extcon/extcon-gpio.c
331
5077
/* * drivers/extcon/extcon_gpio.c * * Single-state GPIO extcon driver based on extcon class * * Copyright (C) 2008 Google, Inc. * Author: Mike Lockwood <lockwood@android.com> * * Modified by MyungJoo Ham <myungjoo.ham@samsung.com> to support extcon * (originally switch class is supported) * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/extcon.h> #include <linux/extcon/extcon-gpio.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/workqueue.h> struct gpio_extcon_data { struct extcon_dev *edev; unsigned gpio; bool gpio_active_low; const char *state_on; const char *state_off; int irq; struct delayed_work work; unsigned long debounce_jiffies; bool check_on_resume; }; static void gpio_extcon_work(struct work_struct *work) { int state; struct gpio_extcon_data *data = container_of(to_delayed_work(work), struct gpio_extcon_data, work); state = gpio_get_value(data->gpio); if (data->gpio_active_low) state = !state; extcon_set_state(data->edev, state); } static irqreturn_t gpio_irq_handler(int irq, void *dev_id) { struct gpio_extcon_data *extcon_data = dev_id; queue_delayed_work(system_power_efficient_wq, &extcon_data->work, extcon_data->debounce_jiffies); return IRQ_HANDLED; } static ssize_t extcon_gpio_print_state(struct extcon_dev *edev, char *buf) { struct device *dev = edev->dev.parent; struct gpio_extcon_data *extcon_data = dev_get_drvdata(dev); const char *state; if (extcon_get_state(edev)) state = extcon_data->state_on; else state = extcon_data->state_off; if (state) return sprintf(buf, "%s\n", state); return -EINVAL; } static int gpio_extcon_probe(struct platform_device *pdev) { struct gpio_extcon_platform_data *pdata = dev_get_platdata(&pdev->dev); struct gpio_extcon_data *extcon_data; int ret; if (!pdata) return -EBUSY; if (!pdata->irq_flags) { dev_err(&pdev->dev, "IRQ flag is not specified.\n"); return -EINVAL; } extcon_data = devm_kzalloc(&pdev->dev, sizeof(struct gpio_extcon_data), GFP_KERNEL); if (!extcon_data) return -ENOMEM; extcon_data->edev = devm_extcon_dev_allocate(&pdev->dev, NULL); if (IS_ERR(extcon_data->edev)) { dev_err(&pdev->dev, "failed to allocate extcon device\n"); return -ENOMEM; } extcon_data->edev->name = pdata->name; extcon_data->gpio = pdata->gpio; extcon_data->gpio_active_low = pdata->gpio_active_low; extcon_data->state_on = pdata->state_on; extcon_data->state_off = pdata->state_off; extcon_data->check_on_resume = pdata->check_on_resume; if (pdata->state_on && pdata->state_off) extcon_data->edev->print_state = extcon_gpio_print_state; ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN, pdev->name); if (ret < 0) return ret; if (pdata->debounce) { ret = gpio_set_debounce(extcon_data->gpio, pdata->debounce * 1000); if (ret < 0) extcon_data->debounce_jiffies = msecs_to_jiffies(pdata->debounce); } ret = devm_extcon_dev_register(&pdev->dev, extcon_data->edev); if (ret < 0) return ret; INIT_DELAYED_WORK(&extcon_data->work, gpio_extcon_work); extcon_data->irq = gpio_to_irq(extcon_data->gpio); if (extcon_data->irq < 0) return extcon_data->irq; ret = request_any_context_irq(extcon_data->irq, gpio_irq_handler, pdata->irq_flags, pdev->name, extcon_data); if (ret < 0) return ret; platform_set_drvdata(pdev, extcon_data); /* Perform initial detection */ gpio_extcon_work(&extcon_data->work.work); return 0; } static int gpio_extcon_remove(struct platform_device *pdev) { struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev); cancel_delayed_work_sync(&extcon_data->work); free_irq(extcon_data->irq, extcon_data); return 0; } #ifdef CONFIG_PM_SLEEP static int gpio_extcon_resume(struct device *dev) { struct gpio_extcon_data *extcon_data; extcon_data = dev_get_drvdata(dev); if (extcon_data->check_on_resume) queue_delayed_work(system_power_efficient_wq, &extcon_data->work, extcon_data->debounce_jiffies); return 0; } #endif static SIMPLE_DEV_PM_OPS(gpio_extcon_pm_ops, NULL, gpio_extcon_resume); static struct platform_driver gpio_extcon_driver = { .probe = gpio_extcon_probe, .remove = gpio_extcon_remove, .driver = { .name = "extcon-gpio", .owner = THIS_MODULE, .pm = &gpio_extcon_pm_ops, }, }; module_platform_driver(gpio_extcon_driver); MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); MODULE_DESCRIPTION("GPIO extcon driver"); MODULE_LICENSE("GPL");
gpl-2.0
lg-devs/android_kernel_lge_msm8952
drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
331
21366
/* * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. * * Copyright (c) 2012, Intel Corporation. * * Author: Zach Brown <zab@zabbo.net> * Author: Peter J. Braam <braam@clusterfs.com> * Author: Phil Schwan <phil@clusterfs.com> * Author: Eric Barton <eric@bartonsoftware.com> * * This file is part of Portals, http://www.sf.net/projects/sandiaportals/ * * Portals is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * Portals is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Portals; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "socklnd.h" /* * Protocol entries : * pro_send_hello : send hello message * pro_recv_hello : receive hello message * pro_pack : pack message header * pro_unpack : unpack message header * pro_queue_tx_zcack() : Called holding BH lock: kss_lock * return 1 if ACK is piggybacked, otherwise return 0 * pro_queue_tx_msg() : Called holding BH lock: kss_lock * return the ACK that piggybacked by my message, or NULL * pro_handle_zcreq() : handler of incoming ZC-REQ * pro_handle_zcack() : handler of incoming ZC-ACK * pro_match_tx() : Called holding glock */ static ksock_tx_t * ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg) { /* V1.x, just enqueue it */ list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue); return NULL; } void ksocknal_next_tx_carrier(ksock_conn_t *conn) { ksock_tx_t *tx = conn->ksnc_tx_carrier; /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */ LASSERT (!list_empty(&conn->ksnc_tx_queue)); LASSERT (tx != NULL); /* Next TX that can carry ZC-ACK or LNet message */ if (tx->tx_list.next == &conn->ksnc_tx_queue) { /* no more packets queued */ conn->ksnc_tx_carrier = NULL; } else { conn->ksnc_tx_carrier = list_entry(tx->tx_list.next, ksock_tx_t, tx_list); LASSERT (conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type); } } static int ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn, ksock_tx_t *tx_ack, __u64 cookie) { ksock_tx_t *tx = conn->ksnc_tx_carrier; LASSERT (tx_ack == NULL || tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP); /* * Enqueue or piggyback tx_ack / cookie * . no tx can piggyback cookie of tx_ack (or cookie), just * enqueue the tx_ack (if tx_ack != NUL) and return NULL. * . There is tx can piggyback cookie of tx_ack (or cookie), * piggyback the cookie and return the tx. */ if (tx == NULL) { if (tx_ack != NULL) { list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue); conn->ksnc_tx_carrier = tx_ack; } return 0; } if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) { /* tx is noop zc-ack, can't piggyback zc-ack cookie */ if (tx_ack != NULL) list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue); return 0; } LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET); LASSERT(tx->tx_msg.ksm_zc_cookies[1] == 0); if (tx_ack != NULL) cookie = tx_ack->tx_msg.ksm_zc_cookies[1]; /* piggyback the zc-ack cookie */ tx->tx_msg.ksm_zc_cookies[1] = cookie; /* move on to the next TX which can carry cookie */ ksocknal_next_tx_carrier(conn); return 1; } static ksock_tx_t * ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg) { ksock_tx_t *tx = conn->ksnc_tx_carrier; /* * Enqueue tx_msg: * . If there is no NOOP on the connection, just enqueue * tx_msg and return NULL * . If there is NOOP on the connection, piggyback the cookie * and replace the NOOP tx, and return the NOOP tx. */ if (tx == NULL) { /* nothing on queue */ list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue); conn->ksnc_tx_carrier = tx_msg; return NULL; } if (tx->tx_msg.ksm_type == KSOCK_MSG_LNET) { /* nothing to carry */ list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue); return NULL; } LASSERT (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP); /* There is a noop zc-ack can be piggybacked */ tx_msg->tx_msg.ksm_zc_cookies[1] = tx->tx_msg.ksm_zc_cookies[1]; ksocknal_next_tx_carrier(conn); /* use new_tx to replace the noop zc-ack packet */ list_add(&tx_msg->tx_list, &tx->tx_list); list_del(&tx->tx_list); return tx; } static int ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn, ksock_tx_t *tx_ack, __u64 cookie) { ksock_tx_t *tx; if (conn->ksnc_type != SOCKLND_CONN_ACK) return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie); /* non-blocking ZC-ACK (to router) */ LASSERT (tx_ack == NULL || tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP); tx = conn->ksnc_tx_carrier; if (tx == NULL) { if (tx_ack != NULL) { list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue); conn->ksnc_tx_carrier = tx_ack; } return 0; } /* conn->ksnc_tx_carrier != NULL */ if (tx_ack != NULL) cookie = tx_ack->tx_msg.ksm_zc_cookies[1]; if (cookie == SOCKNAL_KEEPALIVE_PING) /* ignore keepalive PING */ return 1; if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) { /* replace the keepalive PING with a real ACK */ LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0); tx->tx_msg.ksm_zc_cookies[1] = cookie; return 1; } if (cookie == tx->tx_msg.ksm_zc_cookies[0] || cookie == tx->tx_msg.ksm_zc_cookies[1]) { CWARN("%s: duplicated ZC cookie: %llu\n", libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie); return 1; /* XXX return error in the future */ } if (tx->tx_msg.ksm_zc_cookies[0] == 0) { /* NOOP tx has only one ZC-ACK cookie, can carry at least one more */ if (tx->tx_msg.ksm_zc_cookies[1] > cookie) { tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1]; tx->tx_msg.ksm_zc_cookies[1] = cookie; } else { tx->tx_msg.ksm_zc_cookies[0] = cookie; } if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) { /* not likely to carry more ACKs, skip it to simplify logic */ ksocknal_next_tx_carrier(conn); } return 1; } /* takes two or more cookies already */ if (tx->tx_msg.ksm_zc_cookies[0] > tx->tx_msg.ksm_zc_cookies[1]) { __u64 tmp = 0; /* two separated cookies: (a+2, a) or (a+1, a) */ LASSERT (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] <= 2); if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] == 2) { if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1) tmp = cookie; } else if (cookie == tx->tx_msg.ksm_zc_cookies[1] - 1) { tmp = tx->tx_msg.ksm_zc_cookies[1]; } else if (cookie == tx->tx_msg.ksm_zc_cookies[0] + 1) { tmp = tx->tx_msg.ksm_zc_cookies[0]; } if (tmp != 0) { /* range of cookies */ tx->tx_msg.ksm_zc_cookies[0] = tmp - 1; tx->tx_msg.ksm_zc_cookies[1] = tmp + 1; return 1; } } else { /* ksm_zc_cookies[0] < ksm_zc_cookies[1], it is range of cookies */ if (cookie >= tx->tx_msg.ksm_zc_cookies[0] && cookie <= tx->tx_msg.ksm_zc_cookies[1]) { CWARN("%s: duplicated ZC cookie: %llu\n", libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie); return 1; /* XXX: return error in the future */ } if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1) { tx->tx_msg.ksm_zc_cookies[1] = cookie; return 1; } if (cookie == tx->tx_msg.ksm_zc_cookies[0] - 1) { tx->tx_msg.ksm_zc_cookies[0] = cookie; return 1; } } /* failed to piggyback ZC-ACK */ if (tx_ack != NULL) { list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue); /* the next tx can piggyback at least 1 ACK */ ksocknal_next_tx_carrier(conn); } return 0; } static int ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) { int nob; #if SOCKNAL_VERSION_DEBUG if (!*ksocknal_tunables.ksnd_typed_conns) return SOCKNAL_MATCH_YES; #endif if (tx == NULL || tx->tx_lnetmsg == NULL) { /* noop packet */ nob = offsetof(ksock_msg_t, ksm_u); } else { nob = tx->tx_lnetmsg->msg_len + ((conn->ksnc_proto == &ksocknal_protocol_v1x) ? sizeof(lnet_hdr_t) : sizeof(ksock_msg_t)); } /* default checking for typed connection */ switch (conn->ksnc_type) { default: CERROR("ksnc_type bad: %u\n", conn->ksnc_type); LBUG(); case SOCKLND_CONN_ANY: return SOCKNAL_MATCH_YES; case SOCKLND_CONN_BULK_IN: return SOCKNAL_MATCH_MAY; case SOCKLND_CONN_BULK_OUT: if (nob < *ksocknal_tunables.ksnd_min_bulk) return SOCKNAL_MATCH_MAY; else return SOCKNAL_MATCH_YES; case SOCKLND_CONN_CONTROL: if (nob >= *ksocknal_tunables.ksnd_min_bulk) return SOCKNAL_MATCH_MAY; else return SOCKNAL_MATCH_YES; } } static int ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) { int nob; if (tx == NULL || tx->tx_lnetmsg == NULL) nob = offsetof(ksock_msg_t, ksm_u); else nob = tx->tx_lnetmsg->msg_len + sizeof(ksock_msg_t); switch (conn->ksnc_type) { default: CERROR("ksnc_type bad: %u\n", conn->ksnc_type); LBUG(); case SOCKLND_CONN_ANY: return SOCKNAL_MATCH_NO; case SOCKLND_CONN_ACK: if (nonblk) return SOCKNAL_MATCH_YES; else if (tx == NULL || tx->tx_lnetmsg == NULL) return SOCKNAL_MATCH_MAY; else return SOCKNAL_MATCH_NO; case SOCKLND_CONN_BULK_OUT: if (nonblk) return SOCKNAL_MATCH_NO; else if (nob < *ksocknal_tunables.ksnd_min_bulk) return SOCKNAL_MATCH_MAY; else return SOCKNAL_MATCH_YES; case SOCKLND_CONN_CONTROL: if (nonblk) return SOCKNAL_MATCH_NO; else if (nob >= *ksocknal_tunables.ksnd_min_bulk) return SOCKNAL_MATCH_MAY; else return SOCKNAL_MATCH_YES; } } /* (Sink) handle incoming ZC request from sender */ static int ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote) { ksock_peer_t *peer = c->ksnc_peer; ksock_conn_t *conn; ksock_tx_t *tx; int rc; read_lock(&ksocknal_data.ksnd_global_lock); conn = ksocknal_find_conn_locked(peer, NULL, !!remote); if (conn != NULL) { ksock_sched_t *sched = conn->ksnc_scheduler; LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL); spin_lock_bh(&sched->kss_lock); rc = conn->ksnc_proto->pro_queue_tx_zcack(conn, NULL, cookie); spin_unlock_bh(&sched->kss_lock); if (rc) { /* piggybacked */ read_unlock(&ksocknal_data.ksnd_global_lock); return 0; } } read_unlock(&ksocknal_data.ksnd_global_lock); /* ACK connection is not ready, or can't piggyback the ACK */ tx = ksocknal_alloc_tx_noop(cookie, !!remote); if (tx == NULL) return -ENOMEM; rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id); if (rc == 0) return 0; ksocknal_free_tx(tx); return rc; } /* (Sender) handle ZC_ACK from sink */ static int ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) { ksock_peer_t *peer = conn->ksnc_peer; ksock_tx_t *tx; ksock_tx_t *tmp; LIST_HEAD (zlist); int count; if (cookie1 == 0) cookie1 = cookie2; count = (cookie1 > cookie2) ? 2 : (cookie2 - cookie1 + 1); if (cookie2 == SOCKNAL_KEEPALIVE_PING && conn->ksnc_proto == &ksocknal_protocol_v3x) { /* keepalive PING for V3.x, just ignore it */ return count == 1 ? 0 : -EPROTO; } spin_lock(&peer->ksnp_lock); list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, tx_zc_list) { __u64 c = tx->tx_msg.ksm_zc_cookies[0]; if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) { tx->tx_msg.ksm_zc_cookies[0] = 0; list_del(&tx->tx_zc_list); list_add(&tx->tx_zc_list, &zlist); if (--count == 0) break; } } spin_unlock(&peer->ksnp_lock); while (!list_empty(&zlist)) { tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list); list_del(&tx->tx_zc_list); ksocknal_tx_decref(tx); } return count == 0 ? 0 : -EPROTO; } static int ksocknal_send_hello_v1 (ksock_conn_t *conn, ksock_hello_msg_t *hello) { struct socket *sock = conn->ksnc_sock; lnet_hdr_t *hdr; lnet_magicversion_t *hmv; int rc; int i; CLASSERT(sizeof(lnet_magicversion_t) == offsetof(lnet_hdr_t, src_nid)); LIBCFS_ALLOC(hdr, sizeof(*hdr)); if (hdr == NULL) { CERROR("Can't allocate lnet_hdr_t\n"); return -ENOMEM; } hmv = (lnet_magicversion_t *)&hdr->dest_nid; /* Re-organize V2.x message header to V1.x (lnet_hdr_t) * header and send out */ hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC); hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR); hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR); if (the_lnet.ln_testprotocompat != 0) { /* single-shot proto check */ LNET_LOCK(); if ((the_lnet.ln_testprotocompat & 1) != 0) { hmv->version_major++; /* just different! */ the_lnet.ln_testprotocompat &= ~1; } if ((the_lnet.ln_testprotocompat & 2) != 0) { hmv->magic = LNET_PROTO_MAGIC; the_lnet.ln_testprotocompat &= ~2; } LNET_UNLOCK(); } hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid); hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid); hdr->type = cpu_to_le32 (LNET_MSG_HELLO); hdr->payload_length = cpu_to_le32 (hello->kshm_nips * sizeof(__u32)); hdr->msg.hello.type = cpu_to_le32 (hello->kshm_ctype); hdr->msg.hello.incarnation = cpu_to_le64 (hello->kshm_src_incarnation); rc = libcfs_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout()); if (rc != 0) { CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n", rc, &conn->ksnc_ipaddr, conn->ksnc_port); goto out; } if (hello->kshm_nips == 0) goto out; for (i = 0; i < (int) hello->kshm_nips; i++) { hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]); } rc = libcfs_sock_write(sock, hello->kshm_ips, hello->kshm_nips * sizeof(__u32), lnet_acceptor_timeout()); if (rc != 0) { CNETERR("Error %d sending HELLO payload (%d)" " to %pI4h/%d\n", rc, hello->kshm_nips, &conn->ksnc_ipaddr, conn->ksnc_port); } out: LIBCFS_FREE(hdr, sizeof(*hdr)); return rc; } static int ksocknal_send_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello) { struct socket *sock = conn->ksnc_sock; int rc; hello->kshm_magic = LNET_PROTO_MAGIC; hello->kshm_version = conn->ksnc_proto->pro_version; if (the_lnet.ln_testprotocompat != 0) { /* single-shot proto check */ LNET_LOCK(); if ((the_lnet.ln_testprotocompat & 1) != 0) { hello->kshm_version++; /* just different! */ the_lnet.ln_testprotocompat &= ~1; } LNET_UNLOCK(); } rc = libcfs_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips), lnet_acceptor_timeout()); if (rc != 0) { CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n", rc, &conn->ksnc_ipaddr, conn->ksnc_port); return rc; } if (hello->kshm_nips == 0) return 0; rc = libcfs_sock_write(sock, hello->kshm_ips, hello->kshm_nips * sizeof(__u32), lnet_acceptor_timeout()); if (rc != 0) { CNETERR("Error %d sending HELLO payload (%d)" " to %pI4h/%d\n", rc, hello->kshm_nips, &conn->ksnc_ipaddr, conn->ksnc_port); } return rc; } static int ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout) { struct socket *sock = conn->ksnc_sock; lnet_hdr_t *hdr; int rc; int i; LIBCFS_ALLOC(hdr, sizeof(*hdr)); if (hdr == NULL) { CERROR("Can't allocate lnet_hdr_t\n"); return -ENOMEM; } rc = libcfs_sock_read(sock, &hdr->src_nid, sizeof (*hdr) - offsetof (lnet_hdr_t, src_nid), timeout); if (rc != 0) { CERROR("Error %d reading rest of HELLO hdr from %pI4h\n", rc, &conn->ksnc_ipaddr); LASSERT (rc < 0 && rc != -EALREADY); goto out; } /* ...and check we got what we expected */ if (hdr->type != cpu_to_le32 (LNET_MSG_HELLO)) { CERROR("Expecting a HELLO hdr," " but got type %d from %pI4h\n", le32_to_cpu (hdr->type), &conn->ksnc_ipaddr); rc = -EPROTO; goto out; } hello->kshm_src_nid = le64_to_cpu (hdr->src_nid); hello->kshm_src_pid = le32_to_cpu (hdr->src_pid); hello->kshm_src_incarnation = le64_to_cpu (hdr->msg.hello.incarnation); hello->kshm_ctype = le32_to_cpu (hdr->msg.hello.type); hello->kshm_nips = le32_to_cpu (hdr->payload_length) / sizeof (__u32); if (hello->kshm_nips > LNET_MAX_INTERFACES) { CERROR("Bad nips %d from ip %pI4h\n", hello->kshm_nips, &conn->ksnc_ipaddr); rc = -EPROTO; goto out; } if (hello->kshm_nips == 0) goto out; rc = libcfs_sock_read(sock, hello->kshm_ips, hello->kshm_nips * sizeof(__u32), timeout); if (rc != 0) { CERROR("Error %d reading IPs from ip %pI4h\n", rc, &conn->ksnc_ipaddr); LASSERT(rc < 0 && rc != -EALREADY); goto out; } for (i = 0; i < (int) hello->kshm_nips; i++) { hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]); if (hello->kshm_ips[i] == 0) { CERROR("Zero IP[%d] from ip %pI4h\n", i, &conn->ksnc_ipaddr); rc = -EPROTO; break; } } out: LIBCFS_FREE(hdr, sizeof(*hdr)); return rc; } static int ksocknal_recv_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout) { struct socket *sock = conn->ksnc_sock; int rc; int i; if (hello->kshm_magic == LNET_PROTO_MAGIC) conn->ksnc_flip = 0; else conn->ksnc_flip = 1; rc = libcfs_sock_read(sock, &hello->kshm_src_nid, offsetof(ksock_hello_msg_t, kshm_ips) - offsetof(ksock_hello_msg_t, kshm_src_nid), timeout); if (rc != 0) { CERROR("Error %d reading HELLO from %pI4h\n", rc, &conn->ksnc_ipaddr); LASSERT(rc < 0 && rc != -EALREADY); return rc; } if (conn->ksnc_flip) { __swab32s(&hello->kshm_src_pid); __swab64s(&hello->kshm_src_nid); __swab32s(&hello->kshm_dst_pid); __swab64s(&hello->kshm_dst_nid); __swab64s(&hello->kshm_src_incarnation); __swab64s(&hello->kshm_dst_incarnation); __swab32s(&hello->kshm_ctype); __swab32s(&hello->kshm_nips); } if (hello->kshm_nips > LNET_MAX_INTERFACES) { CERROR("Bad nips %d from ip %pI4h\n", hello->kshm_nips, &conn->ksnc_ipaddr); return -EPROTO; } if (hello->kshm_nips == 0) return 0; rc = libcfs_sock_read(sock, hello->kshm_ips, hello->kshm_nips * sizeof(__u32), timeout); if (rc != 0) { CERROR("Error %d reading IPs from ip %pI4h\n", rc, &conn->ksnc_ipaddr); LASSERT(rc < 0 && rc != -EALREADY); return rc; } for (i = 0; i < (int) hello->kshm_nips; i++) { if (conn->ksnc_flip) __swab32s(&hello->kshm_ips[i]); if (hello->kshm_ips[i] == 0) { CERROR("Zero IP[%d] from ip %pI4h\n", i, &conn->ksnc_ipaddr); return -EPROTO; } } return 0; } static void ksocknal_pack_msg_v1(ksock_tx_t *tx) { /* V1.x has no KSOCK_MSG_NOOP */ LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); LASSERT(tx->tx_lnetmsg != NULL); tx->tx_iov[0].iov_base = (void *)&tx->tx_lnetmsg->msg_hdr; tx->tx_iov[0].iov_len = sizeof(lnet_hdr_t); tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t); } static void ksocknal_pack_msg_v2(ksock_tx_t *tx) { tx->tx_iov[0].iov_base = (void *)&tx->tx_msg; if (tx->tx_lnetmsg != NULL) { LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr; tx->tx_iov[0].iov_len = sizeof(ksock_msg_t); tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len; } else { LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP); tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); } /* Don't checksum before start sending, because packet can be piggybacked with ACK */ } static void ksocknal_unpack_msg_v1(ksock_msg_t *msg) { msg->ksm_csum = 0; msg->ksm_type = KSOCK_MSG_LNET; msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0; } static void ksocknal_unpack_msg_v2(ksock_msg_t *msg) { return; /* Do nothing */ } ksock_proto_t ksocknal_protocol_v1x = { .pro_version = KSOCK_PROTO_V1, .pro_send_hello = ksocknal_send_hello_v1, .pro_recv_hello = ksocknal_recv_hello_v1, .pro_pack = ksocknal_pack_msg_v1, .pro_unpack = ksocknal_unpack_msg_v1, .pro_queue_tx_msg = ksocknal_queue_tx_msg_v1, .pro_handle_zcreq = NULL, .pro_handle_zcack = NULL, .pro_queue_tx_zcack = NULL, .pro_match_tx = ksocknal_match_tx }; ksock_proto_t ksocknal_protocol_v2x = { .pro_version = KSOCK_PROTO_V2, .pro_send_hello = ksocknal_send_hello_v2, .pro_recv_hello = ksocknal_recv_hello_v2, .pro_pack = ksocknal_pack_msg_v2, .pro_unpack = ksocknal_unpack_msg_v2, .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2, .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v2, .pro_handle_zcreq = ksocknal_handle_zcreq, .pro_handle_zcack = ksocknal_handle_zcack, .pro_match_tx = ksocknal_match_tx }; ksock_proto_t ksocknal_protocol_v3x = { .pro_version = KSOCK_PROTO_V3, .pro_send_hello = ksocknal_send_hello_v2, .pro_recv_hello = ksocknal_recv_hello_v2, .pro_pack = ksocknal_pack_msg_v2, .pro_unpack = ksocknal_unpack_msg_v2, .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2, .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v3, .pro_handle_zcreq = ksocknal_handle_zcreq, .pro_handle_zcack = ksocknal_handle_zcack, .pro_match_tx = ksocknal_match_tx_v3 };
gpl-2.0
darchstar/kernel-heroc-2.6.32
drivers/edac/x38_edac.c
587
12142
/* * Intel X38 Memory Controller kernel module * Copyright (C) 2008 Cluster Computing, Inc. * * This file may be distributed under the terms of the * GNU General Public License. * * This file is based on i3200_edac.c * */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/slab.h> #include <linux/edac.h> #include "edac_core.h" #define X38_REVISION "1.1" #define EDAC_MOD_STR "x38_edac" #define PCI_DEVICE_ID_INTEL_X38_HB 0x29e0 #define X38_RANKS 8 #define X38_RANKS_PER_CHANNEL 4 #define X38_CHANNELS 2 /* Intel X38 register addresses - device 0 function 0 - DRAM Controller */ #define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */ #define X38_MCHBAR_HIGH 0x4c #define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */ #define X38_MMR_WINDOW_SIZE 16384 #define X38_TOM 0xa0 /* Top of Memory (16b) * * 15:10 reserved * 9:0 total populated physical memory */ #define X38_TOM_MASK 0x3ff /* bits 9:0 */ #define X38_TOM_SHIFT 26 /* 64MiB grain */ #define X38_ERRSTS 0xc8 /* Error Status Register (16b) * * 15 reserved * 14 Isochronous TBWRR Run Behind FIFO Full * (ITCV) * 13 Isochronous TBWRR Run Behind FIFO Put * (ITSTV) * 12 reserved * 11 MCH Thermal Sensor Event * for SMI/SCI/SERR (GTSE) * 10 reserved * 9 LOCK to non-DRAM Memory Flag (LCKF) * 8 reserved * 7 DRAM Throttle Flag (DTF) * 6:2 reserved * 1 Multi-bit DRAM ECC Error Flag (DMERR) * 0 Single-bit DRAM ECC Error Flag (DSERR) */ #define X38_ERRSTS_UE 0x0002 #define X38_ERRSTS_CE 0x0001 #define X38_ERRSTS_BITS (X38_ERRSTS_UE | X38_ERRSTS_CE) /* Intel MMIO register space - device 0 function 0 - MMR space */ #define X38_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4) * * 15:10 reserved * 9:0 Channel 0 DRAM Rank Boundary Address */ #define X38_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */ #define X38_DRB_MASK 0x3ff /* bits 9:0 */ #define X38_DRB_SHIFT 26 /* 64MiB grain */ #define X38_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b) * * 63:48 Error Column Address (ERRCOL) * 47:32 Error Row Address (ERRROW) * 31:29 Error Bank Address (ERRBANK) * 28:27 Error Rank Address (ERRRANK) * 26:24 reserved * 23:16 Error Syndrome (ERRSYND) * 15: 2 reserved * 1 Multiple Bit Error Status (MERRSTS) * 0 Correctable Error Status (CERRSTS) */ #define X38_C1ECCERRLOG 0x680 /* Channel 1 ECC Error Log (64b) */ #define X38_ECCERRLOG_CE 0x1 #define X38_ECCERRLOG_UE 0x2 #define X38_ECCERRLOG_RANK_BITS 0x18000000 #define X38_ECCERRLOG_SYNDROME_BITS 0xff0000 #define X38_CAPID0 0xe0 /* see P.94 of spec for details */ static int x38_channel_num; static int how_many_channel(struct pci_dev *pdev) { unsigned char capid0_8b; /* 8th byte of CAPID0 */ pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b); if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ debugf0("In single channel mode.\n"); x38_channel_num = 1; } else { debugf0("In dual channel mode.\n"); x38_channel_num = 2; } return x38_channel_num; } static unsigned long eccerrlog_syndrome(u64 log) { return (log & X38_ECCERRLOG_SYNDROME_BITS) >> 16; } static int eccerrlog_row(int channel, u64 log) { return ((log & X38_ECCERRLOG_RANK_BITS) >> 27) | (channel * X38_RANKS_PER_CHANNEL); } enum x38_chips { X38 = 0, }; struct x38_dev_info { const char *ctl_name; }; struct x38_error_info { u16 errsts; u16 errsts2; u64 eccerrlog[X38_CHANNELS]; }; static const struct x38_dev_info x38_devs[] = { [X38] = { .ctl_name = "x38"}, }; static struct pci_dev *mci_pdev; static int x38_registered = 1; static void x38_clear_error_info(struct mem_ctl_info *mci) { struct pci_dev *pdev; pdev = to_pci_dev(mci->dev); /* * Clear any error bits. * (Yes, we really clear bits by writing 1 to them.) */ pci_write_bits16(pdev, X38_ERRSTS, X38_ERRSTS_BITS, X38_ERRSTS_BITS); } static u64 x38_readq(const void __iomem *addr) { return readl(addr) | (((u64)readl(addr + 4)) << 32); } static void x38_get_and_clear_error_info(struct mem_ctl_info *mci, struct x38_error_info *info) { struct pci_dev *pdev; void __iomem *window = mci->pvt_info; pdev = to_pci_dev(mci->dev); /* * This is a mess because there is no atomic way to read all the * registers at once and the registers can transition from CE being * overwritten by UE. */ pci_read_config_word(pdev, X38_ERRSTS, &info->errsts); if (!(info->errsts & X38_ERRSTS_BITS)) return; info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG); if (x38_channel_num == 2) info->eccerrlog[1] = x38_readq(window + X38_C1ECCERRLOG); pci_read_config_word(pdev, X38_ERRSTS, &info->errsts2); /* * If the error is the same for both reads then the first set * of reads is valid. If there is a change then there is a CE * with no info and the second set of reads is valid and * should be UE info. */ if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) { info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG); if (x38_channel_num == 2) info->eccerrlog[1] = x38_readq(window + X38_C1ECCERRLOG); } x38_clear_error_info(mci); } static void x38_process_error_info(struct mem_ctl_info *mci, struct x38_error_info *info) { int channel; u64 log; if (!(info->errsts & X38_ERRSTS_BITS)) return; if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) { edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); info->errsts = info->errsts2; } for (channel = 0; channel < x38_channel_num; channel++) { log = info->eccerrlog[channel]; if (log & X38_ECCERRLOG_UE) { edac_mc_handle_ue(mci, 0, 0, eccerrlog_row(channel, log), "x38 UE"); } else if (log & X38_ECCERRLOG_CE) { edac_mc_handle_ce(mci, 0, 0, eccerrlog_syndrome(log), eccerrlog_row(channel, log), 0, "x38 CE"); } } } static void x38_check(struct mem_ctl_info *mci) { struct x38_error_info info; debugf1("MC%d: %s()\n", mci->mc_idx, __func__); x38_get_and_clear_error_info(mci, &info); x38_process_error_info(mci, &info); } void __iomem *x38_map_mchbar(struct pci_dev *pdev) { union { u64 mchbar; struct { u32 mchbar_low; u32 mchbar_high; }; } u; void __iomem *window; pci_read_config_dword(pdev, X38_MCHBAR_LOW, &u.mchbar_low); pci_write_config_dword(pdev, X38_MCHBAR_LOW, u.mchbar_low | 0x1); pci_read_config_dword(pdev, X38_MCHBAR_HIGH, &u.mchbar_high); u.mchbar &= X38_MCHBAR_MASK; if (u.mchbar != (resource_size_t)u.mchbar) { printk(KERN_ERR "x38: mmio space beyond accessible range (0x%llx)\n", (unsigned long long)u.mchbar); return NULL; } window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE); if (!window) printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n", (unsigned long long)u.mchbar); return window; } static void x38_get_drbs(void __iomem *window, u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL]) { int i; for (i = 0; i < X38_RANKS_PER_CHANNEL; i++) { drbs[0][i] = readw(window + X38_C0DRB + 2*i) & X38_DRB_MASK; drbs[1][i] = readw(window + X38_C1DRB + 2*i) & X38_DRB_MASK; } } static bool x38_is_stacked(struct pci_dev *pdev, u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL]) { u16 tom; pci_read_config_word(pdev, X38_TOM, &tom); tom &= X38_TOM_MASK; return drbs[X38_CHANNELS - 1][X38_RANKS_PER_CHANNEL - 1] == tom; } static unsigned long drb_to_nr_pages( u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL], bool stacked, int channel, int rank) { int n; n = drbs[channel][rank]; if (rank > 0) n -= drbs[channel][rank - 1]; if (stacked && (channel == 1) && drbs[channel][rank] == drbs[channel][X38_RANKS_PER_CHANNEL - 1]) { n -= drbs[0][X38_RANKS_PER_CHANNEL - 1]; } n <<= (X38_DRB_SHIFT - PAGE_SHIFT); return n; } static int x38_probe1(struct pci_dev *pdev, int dev_idx) { int rc; int i; struct mem_ctl_info *mci = NULL; unsigned long last_page; u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL]; bool stacked; void __iomem *window; debugf0("MC: %s()\n", __func__); window = x38_map_mchbar(pdev); if (!window) return -ENODEV; x38_get_drbs(window, drbs); how_many_channel(pdev); /* FIXME: unconventional pvt_info usage */ mci = edac_mc_alloc(0, X38_RANKS, x38_channel_num, 0); if (!mci) return -ENOMEM; debugf3("MC: %s(): init mci\n", __func__); mci->dev = &pdev->dev; mci->mtype_cap = MEM_FLAG_DDR2; mci->edac_ctl_cap = EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_SECDED; mci->mod_name = EDAC_MOD_STR; mci->mod_ver = X38_REVISION; mci->ctl_name = x38_devs[dev_idx].ctl_name; mci->dev_name = pci_name(pdev); mci->edac_check = x38_check; mci->ctl_page_to_phys = NULL; mci->pvt_info = window; stacked = x38_is_stacked(pdev, drbs); /* * The dram rank boundary (DRB) reg values are boundary addresses * for each DRAM rank with a granularity of 64MB. DRB regs are * cumulative; the last one will contain the total memory * contained in all ranks. */ last_page = -1UL; for (i = 0; i < mci->nr_csrows; i++) { unsigned long nr_pages; struct csrow_info *csrow = &mci->csrows[i]; nr_pages = drb_to_nr_pages(drbs, stacked, i / X38_RANKS_PER_CHANNEL, i % X38_RANKS_PER_CHANNEL); if (nr_pages == 0) { csrow->mtype = MEM_EMPTY; continue; } csrow->first_page = last_page + 1; last_page += nr_pages; csrow->last_page = last_page; csrow->nr_pages = nr_pages; csrow->grain = nr_pages << PAGE_SHIFT; csrow->mtype = MEM_DDR2; csrow->dtype = DEV_UNKNOWN; csrow->edac_mode = EDAC_UNKNOWN; } x38_clear_error_info(mci); rc = -ENODEV; if (edac_mc_add_mc(mci)) { debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); goto fail; } /* get this far and it's successful */ debugf3("MC: %s(): success\n", __func__); return 0; fail: iounmap(window); if (mci) edac_mc_free(mci); return rc; } static int __devinit x38_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc; debugf0("MC: %s()\n", __func__); if (pci_enable_device(pdev) < 0) return -EIO; rc = x38_probe1(pdev, ent->driver_data); if (!mci_pdev) mci_pdev = pci_dev_get(pdev); return rc; } static void __devexit x38_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; debugf0("%s()\n", __func__); mci = edac_mc_del_mc(&pdev->dev); if (!mci) return; iounmap(mci->pvt_info); edac_mc_free(mci); } static const struct pci_device_id x38_pci_tbl[] __devinitdata = { { PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, X38}, { 0, } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, x38_pci_tbl); static struct pci_driver x38_driver = { .name = EDAC_MOD_STR, .probe = x38_init_one, .remove = __devexit_p(x38_remove_one), .id_table = x38_pci_tbl, }; static int __init x38_init(void) { int pci_rc; debugf3("MC: %s()\n", __func__); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); pci_rc = pci_register_driver(&x38_driver); if (pci_rc < 0) goto fail0; if (!mci_pdev) { x38_registered = 0; mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X38_HB, NULL); if (!mci_pdev) { debugf0("x38 pci_get_device fail\n"); pci_rc = -ENODEV; goto fail1; } pci_rc = x38_init_one(mci_pdev, x38_pci_tbl); if (pci_rc < 0) { debugf0("x38 init fail\n"); pci_rc = -ENODEV; goto fail1; } } return 0; fail1: pci_unregister_driver(&x38_driver); fail0: if (mci_pdev) pci_dev_put(mci_pdev); return pci_rc; } static void __exit x38_exit(void) { debugf3("MC: %s()\n", __func__); pci_unregister_driver(&x38_driver); if (!x38_registered) { x38_remove_one(mci_pdev); pci_dev_put(mci_pdev); } } module_init(x38_init); module_exit(x38_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cluster Computing, Inc. Hitoshi Mitake"); MODULE_DESCRIPTION("MC support for Intel X38 memory hub controllers"); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
gpl-2.0
agayev/linux
fs/omfs/dir.c
843
9854
/* * OMFS (as used by RIO Karma) directory operations. * Copyright (C) 2005 Bob Copeland <me@bobcopeland.com> * Released under GPL v2. */ #include <linux/fs.h> #include <linux/ctype.h> #include <linux/buffer_head.h> #include "omfs.h" static int omfs_hash(const char *name, int namelen, int mod) { int i, hash = 0; for (i = 0; i < namelen; i++) hash ^= tolower(name[i]) << (i % 24); return hash % mod; } /* * Finds the bucket for a given name and reads the containing block; * *ofs is set to the offset of the first list entry. */ static struct buffer_head *omfs_get_bucket(struct inode *dir, const char *name, int namelen, int *ofs) { int nbuckets = (dir->i_size - OMFS_DIR_START)/8; int bucket = omfs_hash(name, namelen, nbuckets); *ofs = OMFS_DIR_START + bucket * 8; return omfs_bread(dir->i_sb, dir->i_ino); } static struct buffer_head *omfs_scan_list(struct inode *dir, u64 block, const char *name, int namelen, u64 *prev_block) { struct buffer_head *bh; struct omfs_inode *oi; int err = -ENOENT; *prev_block = ~0; while (block != ~0) { bh = omfs_bread(dir->i_sb, block); if (!bh) { err = -EIO; goto err; } oi = (struct omfs_inode *) bh->b_data; if (omfs_is_bad(OMFS_SB(dir->i_sb), &oi->i_head, block)) { brelse(bh); goto err; } if (strncmp(oi->i_name, name, namelen) == 0) return bh; *prev_block = block; block = be64_to_cpu(oi->i_sibling); brelse(bh); } err: return ERR_PTR(err); } static struct buffer_head *omfs_find_entry(struct inode *dir, const char *name, int namelen) { struct buffer_head *bh; int ofs; u64 block, dummy; bh = omfs_get_bucket(dir, name, namelen, &ofs); if (!bh) return ERR_PTR(-EIO); block = be64_to_cpu(*((__be64 *) &bh->b_data[ofs])); brelse(bh); return omfs_scan_list(dir, block, name, namelen, &dummy); } int omfs_make_empty(struct inode *inode, struct super_block *sb) { struct omfs_sb_info *sbi = OMFS_SB(sb); struct buffer_head *bh; struct omfs_inode *oi; bh = omfs_bread(sb, inode->i_ino); if (!bh) return -ENOMEM; memset(bh->b_data, 0, sizeof(struct omfs_inode)); if (S_ISDIR(inode->i_mode)) { memset(&bh->b_data[OMFS_DIR_START], 0xff, sbi->s_sys_blocksize - OMFS_DIR_START); } else omfs_make_empty_table(bh, OMFS_EXTENT_START); oi = (struct omfs_inode *) bh->b_data; oi->i_head.h_self = cpu_to_be64(inode->i_ino); oi->i_sibling = ~cpu_to_be64(0ULL); mark_buffer_dirty(bh); brelse(bh); return 0; } static int omfs_add_link(struct dentry *dentry, struct inode *inode) { struct inode *dir = d_inode(dentry->d_parent); const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; struct omfs_inode *oi; struct buffer_head *bh; u64 block; __be64 *entry; int ofs; /* just prepend to head of queue in proper bucket */ bh = omfs_get_bucket(dir, name, namelen, &ofs); if (!bh) goto out; entry = (__be64 *) &bh->b_data[ofs]; block = be64_to_cpu(*entry); *entry = cpu_to_be64(inode->i_ino); mark_buffer_dirty(bh); brelse(bh); /* now set the sibling and parent pointers on the new inode */ bh = omfs_bread(dir->i_sb, inode->i_ino); if (!bh) goto out; oi = (struct omfs_inode *) bh->b_data; memcpy(oi->i_name, name, namelen); memset(oi->i_name + namelen, 0, OMFS_NAMELEN - namelen); oi->i_sibling = cpu_to_be64(block); oi->i_parent = cpu_to_be64(dir->i_ino); mark_buffer_dirty(bh); brelse(bh); dir->i_ctime = CURRENT_TIME_SEC; /* mark affected inodes dirty to rebuild checksums */ mark_inode_dirty(dir); mark_inode_dirty(inode); return 0; out: return -ENOMEM; } static int omfs_delete_entry(struct dentry *dentry) { struct inode *dir = d_inode(dentry->d_parent); struct inode *dirty; const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; struct omfs_inode *oi; struct buffer_head *bh, *bh2; __be64 *entry, next; u64 block, prev; int ofs; int err = -ENOMEM; /* delete the proper node in the bucket's linked list */ bh = omfs_get_bucket(dir, name, namelen, &ofs); if (!bh) goto out; entry = (__be64 *) &bh->b_data[ofs]; block = be64_to_cpu(*entry); bh2 = omfs_scan_list(dir, block, name, namelen, &prev); if (IS_ERR(bh2)) { err = PTR_ERR(bh2); goto out_free_bh; } oi = (struct omfs_inode *) bh2->b_data; next = oi->i_sibling; brelse(bh2); if (prev != ~0) { /* found in middle of list, get list ptr */ brelse(bh); bh = omfs_bread(dir->i_sb, prev); if (!bh) goto out; oi = (struct omfs_inode *) bh->b_data; entry = &oi->i_sibling; } *entry = next; mark_buffer_dirty(bh); if (prev != ~0) { dirty = omfs_iget(dir->i_sb, prev); if (!IS_ERR(dirty)) { mark_inode_dirty(dirty); iput(dirty); } } err = 0; out_free_bh: brelse(bh); out: return err; } static int omfs_dir_is_empty(struct inode *inode) { int nbuckets = (inode->i_size - OMFS_DIR_START) / 8; struct buffer_head *bh; u64 *ptr; int i; bh = omfs_bread(inode->i_sb, inode->i_ino); if (!bh) return 0; ptr = (u64 *) &bh->b_data[OMFS_DIR_START]; for (i = 0; i < nbuckets; i++, ptr++) if (*ptr != ~0) break; brelse(bh); return *ptr != ~0; } static int omfs_remove(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); int ret; if (S_ISDIR(inode->i_mode) && !omfs_dir_is_empty(inode)) return -ENOTEMPTY; ret = omfs_delete_entry(dentry); if (ret) return ret; clear_nlink(inode); mark_inode_dirty(inode); mark_inode_dirty(dir); return 0; } static int omfs_add_node(struct inode *dir, struct dentry *dentry, umode_t mode) { int err; struct inode *inode = omfs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); err = omfs_make_empty(inode, dir->i_sb); if (err) goto out_free_inode; err = omfs_add_link(dentry, inode); if (err) goto out_free_inode; d_instantiate(dentry, inode); return 0; out_free_inode: iput(inode); return err; } static int omfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { return omfs_add_node(dir, dentry, mode | S_IFDIR); } static int omfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { return omfs_add_node(dir, dentry, mode | S_IFREG); } static struct dentry *omfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct buffer_head *bh; struct inode *inode = NULL; if (dentry->d_name.len > OMFS_NAMELEN) return ERR_PTR(-ENAMETOOLONG); bh = omfs_find_entry(dir, dentry->d_name.name, dentry->d_name.len); if (!IS_ERR(bh)) { struct omfs_inode *oi = (struct omfs_inode *)bh->b_data; ino_t ino = be64_to_cpu(oi->i_head.h_self); brelse(bh); inode = omfs_iget(dir->i_sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); } d_add(dentry, inode); return NULL; } /* sanity check block's self pointer */ int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header, u64 fsblock) { int is_bad; u64 ino = be64_to_cpu(header->h_self); is_bad = ((ino != fsblock) || (ino < sbi->s_root_ino) || (ino > sbi->s_num_blocks)); if (is_bad) printk(KERN_WARNING "omfs: bad hash chain detected\n"); return is_bad; } static bool omfs_fill_chain(struct inode *dir, struct dir_context *ctx, u64 fsblock, int hindex) { /* follow chain in this bucket */ while (fsblock != ~0) { struct buffer_head *bh = omfs_bread(dir->i_sb, fsblock); struct omfs_inode *oi; u64 self; unsigned char d_type; if (!bh) return true; oi = (struct omfs_inode *) bh->b_data; if (omfs_is_bad(OMFS_SB(dir->i_sb), &oi->i_head, fsblock)) { brelse(bh); return true; } self = fsblock; fsblock = be64_to_cpu(oi->i_sibling); /* skip visited nodes */ if (hindex) { hindex--; brelse(bh); continue; } d_type = (oi->i_type == OMFS_DIR) ? DT_DIR : DT_REG; if (!dir_emit(ctx, oi->i_name, strnlen(oi->i_name, OMFS_NAMELEN), self, d_type)) { brelse(bh); return false; } brelse(bh); ctx->pos++; } return true; } static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *new_inode = d_inode(new_dentry); struct inode *old_inode = d_inode(old_dentry); int err; if (new_inode) { /* overwriting existing file/dir */ err = omfs_remove(new_dir, new_dentry); if (err) goto out; } /* since omfs locates files by name, we need to unlink _before_ * adding the new link or we won't find the old one */ err = omfs_delete_entry(old_dentry); if (err) goto out; mark_inode_dirty(old_dir); err = omfs_add_link(new_dentry, old_inode); if (err) goto out; old_inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(old_inode); out: return err; } static int omfs_readdir(struct file *file, struct dir_context *ctx) { struct inode *dir = file_inode(file); struct buffer_head *bh; __be64 *p; unsigned int hchain, hindex; int nbuckets; if (ctx->pos >> 32) return -EINVAL; if (ctx->pos < 1 << 20) { if (!dir_emit_dots(file, ctx)) return 0; ctx->pos = 1 << 20; } nbuckets = (dir->i_size - OMFS_DIR_START) / 8; /* high 12 bits store bucket + 1 and low 20 bits store hash index */ hchain = (ctx->pos >> 20) - 1; hindex = ctx->pos & 0xfffff; bh = omfs_bread(dir->i_sb, dir->i_ino); if (!bh) return -EINVAL; p = (__be64 *)(bh->b_data + OMFS_DIR_START) + hchain; for (; hchain < nbuckets; hchain++) { __u64 fsblock = be64_to_cpu(*p++); if (!omfs_fill_chain(dir, ctx, fsblock, hindex)) break; hindex = 0; ctx->pos = (hchain+2) << 20; } brelse(bh); return 0; } const struct inode_operations omfs_dir_inops = { .lookup = omfs_lookup, .mkdir = omfs_mkdir, .rename = omfs_rename, .create = omfs_create, .unlink = omfs_remove, .rmdir = omfs_remove, }; const struct file_operations omfs_dir_operations = { .read = generic_read_dir, .iterate = omfs_readdir, .llseek = generic_file_llseek, };
gpl-2.0
pro4tlzz/P9000-Kernel
drivers/input/keyboard/lpc32xx-keys.c
843
10510
/* * NXP LPC32xx SoC Key Scan Interface * * Authors: * Kevin Wells <kevin.wells@nxp.com> * Roland Stigge <stigge@antcom.de> * * Copyright (C) 2010 NXP Semiconductors * Copyright (C) 2012 Roland Stigge * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * This controller supports square key matrices from 1x1 up to 8x8 */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/of.h> #include <linux/input/matrix_keypad.h> #define DRV_NAME "lpc32xx_keys" /* * Key scanner register offsets */ #define LPC32XX_KS_DEB(x) ((x) + 0x00) #define LPC32XX_KS_STATE_COND(x) ((x) + 0x04) #define LPC32XX_KS_IRQ(x) ((x) + 0x08) #define LPC32XX_KS_SCAN_CTL(x) ((x) + 0x0C) #define LPC32XX_KS_FAST_TST(x) ((x) + 0x10) #define LPC32XX_KS_MATRIX_DIM(x) ((x) + 0x14) /* 1..8 */ #define LPC32XX_KS_DATA(x, y) ((x) + 0x40 + ((y) << 2)) #define LPC32XX_KSCAN_DEB_NUM_DEB_PASS(n) ((n) & 0xFF) #define LPC32XX_KSCAN_SCOND_IN_IDLE 0x0 #define LPC32XX_KSCAN_SCOND_IN_SCANONCE 0x1 #define LPC32XX_KSCAN_SCOND_IN_IRQGEN 0x2 #define LPC32XX_KSCAN_SCOND_IN_SCAN_MATRIX 0x3 #define LPC32XX_KSCAN_IRQ_PENDING_CLR 0x1 #define LPC32XX_KSCAN_SCTRL_SCAN_DELAY(n) ((n) & 0xFF) #define LPC32XX_KSCAN_FTST_FORCESCANONCE 0x1 #define LPC32XX_KSCAN_FTST_USE32K_CLK 0x2 #define LPC32XX_KSCAN_MSEL_SELECT(n) ((n) & 0xF) struct lpc32xx_kscan_drv { struct input_dev *input; struct clk *clk; struct resource *iores; void __iomem *kscan_base; unsigned int irq; u32 matrix_sz; /* Size of matrix in XxY, ie. 3 = 3x3 */ u32 deb_clks; /* Debounce clocks (based on 32KHz clock) */ u32 scan_delay; /* Scan delay (based on 32KHz clock) */ unsigned short *keymap; /* Pointer to key map for the scan matrix */ unsigned int row_shift; u8 lastkeystates[8]; }; static void lpc32xx_mod_states(struct lpc32xx_kscan_drv *kscandat, int col) { struct input_dev *input = kscandat->input; unsigned row, changed, scancode, keycode; u8 key; key = readl(LPC32XX_KS_DATA(kscandat->kscan_base, col)); changed = key ^ kscandat->lastkeystates[col]; kscandat->lastkeystates[col] = key; for (row = 0; changed; row++, changed >>= 1) { if (changed & 1) { /* Key state changed, signal an event */ scancode = MATRIX_SCAN_CODE(row, col, kscandat->row_shift); keycode = kscandat->keymap[scancode]; input_event(input, EV_MSC, MSC_SCAN, scancode); input_report_key(input, keycode, key & (1 << row)); } } } static irqreturn_t lpc32xx_kscan_irq(int irq, void *dev_id) { struct lpc32xx_kscan_drv *kscandat = dev_id; int i; for (i = 0; i < kscandat->matrix_sz; i++) lpc32xx_mod_states(kscandat, i); writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); input_sync(kscandat->input); return IRQ_HANDLED; } static int lpc32xx_kscan_open(struct input_dev *dev) { struct lpc32xx_kscan_drv *kscandat = input_get_drvdata(dev); int error; error = clk_prepare_enable(kscandat->clk); if (error) return error; writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); return 0; } static void lpc32xx_kscan_close(struct input_dev *dev) { struct lpc32xx_kscan_drv *kscandat = input_get_drvdata(dev); writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); clk_disable_unprepare(kscandat->clk); } static int lpc32xx_parse_dt(struct device *dev, struct lpc32xx_kscan_drv *kscandat) { struct device_node *np = dev->of_node; u32 rows = 0, columns = 0; int err; err = matrix_keypad_parse_of_params(dev, &rows, &columns); if (err) return err; if (rows != columns) { dev_err(dev, "rows and columns must be equal!\n"); return -EINVAL; } kscandat->matrix_sz = rows; kscandat->row_shift = get_count_order(columns); of_property_read_u32(np, "nxp,debounce-delay-ms", &kscandat->deb_clks); of_property_read_u32(np, "nxp,scan-delay-ms", &kscandat->scan_delay); if (!kscandat->deb_clks || !kscandat->scan_delay) { dev_err(dev, "debounce or scan delay not specified\n"); return -EINVAL; } return 0; } static int lpc32xx_kscan_probe(struct platform_device *pdev) { struct lpc32xx_kscan_drv *kscandat; struct input_dev *input; struct resource *res; size_t keymap_size; int error; int irq; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get platform I/O memory\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (irq < 0 || irq >= NR_IRQS) { dev_err(&pdev->dev, "failed to get platform irq\n"); return -EINVAL; } kscandat = kzalloc(sizeof(struct lpc32xx_kscan_drv), GFP_KERNEL); if (!kscandat) { dev_err(&pdev->dev, "failed to allocate memory\n"); return -ENOMEM; } error = lpc32xx_parse_dt(&pdev->dev, kscandat); if (error) { dev_err(&pdev->dev, "failed to parse device tree\n"); goto err_free_mem; } keymap_size = sizeof(kscandat->keymap[0]) * (kscandat->matrix_sz << kscandat->row_shift); kscandat->keymap = kzalloc(keymap_size, GFP_KERNEL); if (!kscandat->keymap) { dev_err(&pdev->dev, "could not allocate memory for keymap\n"); error = -ENOMEM; goto err_free_mem; } kscandat->input = input = input_allocate_device(); if (!input) { dev_err(&pdev->dev, "failed to allocate input device\n"); error = -ENOMEM; goto err_free_keymap; } /* Setup key input */ input->name = pdev->name; input->phys = "lpc32xx/input0"; input->id.vendor = 0x0001; input->id.product = 0x0001; input->id.version = 0x0100; input->open = lpc32xx_kscan_open; input->close = lpc32xx_kscan_close; input->dev.parent = &pdev->dev; input_set_capability(input, EV_MSC, MSC_SCAN); error = matrix_keypad_build_keymap(NULL, NULL, kscandat->matrix_sz, kscandat->matrix_sz, kscandat->keymap, kscandat->input); if (error) { dev_err(&pdev->dev, "failed to build keymap\n"); goto err_free_input; } input_set_drvdata(kscandat->input, kscandat); kscandat->iores = request_mem_region(res->start, resource_size(res), pdev->name); if (!kscandat->iores) { dev_err(&pdev->dev, "failed to request I/O memory\n"); error = -EBUSY; goto err_free_input; } kscandat->kscan_base = ioremap(kscandat->iores->start, resource_size(kscandat->iores)); if (!kscandat->kscan_base) { dev_err(&pdev->dev, "failed to remap I/O memory\n"); error = -EBUSY; goto err_release_memregion; } /* Get the key scanner clock */ kscandat->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(kscandat->clk)) { dev_err(&pdev->dev, "failed to get clock\n"); error = PTR_ERR(kscandat->clk); goto err_unmap; } /* Configure the key scanner */ error = clk_prepare_enable(kscandat->clk); if (error) goto err_clk_put; writel(kscandat->deb_clks, LPC32XX_KS_DEB(kscandat->kscan_base)); writel(kscandat->scan_delay, LPC32XX_KS_SCAN_CTL(kscandat->kscan_base)); writel(LPC32XX_KSCAN_FTST_USE32K_CLK, LPC32XX_KS_FAST_TST(kscandat->kscan_base)); writel(kscandat->matrix_sz, LPC32XX_KS_MATRIX_DIM(kscandat->kscan_base)); writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); clk_disable_unprepare(kscandat->clk); error = request_irq(irq, lpc32xx_kscan_irq, 0, pdev->name, kscandat); if (error) { dev_err(&pdev->dev, "failed to request irq\n"); goto err_clk_put; } error = input_register_device(kscandat->input); if (error) { dev_err(&pdev->dev, "failed to register input device\n"); goto err_free_irq; } platform_set_drvdata(pdev, kscandat); return 0; err_free_irq: free_irq(irq, kscandat); err_clk_put: clk_put(kscandat->clk); err_unmap: iounmap(kscandat->kscan_base); err_release_memregion: release_mem_region(kscandat->iores->start, resource_size(kscandat->iores)); err_free_input: input_free_device(kscandat->input); err_free_keymap: kfree(kscandat->keymap); err_free_mem: kfree(kscandat); return error; } static int lpc32xx_kscan_remove(struct platform_device *pdev) { struct lpc32xx_kscan_drv *kscandat = platform_get_drvdata(pdev); free_irq(platform_get_irq(pdev, 0), kscandat); clk_put(kscandat->clk); iounmap(kscandat->kscan_base); release_mem_region(kscandat->iores->start, resource_size(kscandat->iores)); input_unregister_device(kscandat->input); kfree(kscandat->keymap); kfree(kscandat); return 0; } #ifdef CONFIG_PM_SLEEP static int lpc32xx_kscan_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct lpc32xx_kscan_drv *kscandat = platform_get_drvdata(pdev); struct input_dev *input = kscandat->input; mutex_lock(&input->mutex); if (input->users) { /* Clear IRQ and disable clock */ writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); clk_disable_unprepare(kscandat->clk); } mutex_unlock(&input->mutex); return 0; } static int lpc32xx_kscan_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct lpc32xx_kscan_drv *kscandat = platform_get_drvdata(pdev); struct input_dev *input = kscandat->input; int retval = 0; mutex_lock(&input->mutex); if (input->users) { /* Enable clock and clear IRQ */ retval = clk_prepare_enable(kscandat->clk); if (retval == 0) writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); } mutex_unlock(&input->mutex); return retval; } #endif static SIMPLE_DEV_PM_OPS(lpc32xx_kscan_pm_ops, lpc32xx_kscan_suspend, lpc32xx_kscan_resume); static const struct of_device_id lpc32xx_kscan_match[] = { { .compatible = "nxp,lpc3220-key" }, {}, }; MODULE_DEVICE_TABLE(of, lpc32xx_kscan_match); static struct platform_driver lpc32xx_kscan_driver = { .probe = lpc32xx_kscan_probe, .remove = lpc32xx_kscan_remove, .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .pm = &lpc32xx_kscan_pm_ops, .of_match_table = lpc32xx_kscan_match, } }; module_platform_driver(lpc32xx_kscan_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>"); MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>"); MODULE_DESCRIPTION("Key scanner driver for LPC32XX devices");
gpl-2.0
LorDClockaN/Ace-2.6.35
arch/sparc/kernel/pcic.c
843
24623
/* * pcic.c: MicroSPARC-IIep PCI controller support * * Copyright (C) 1998 V. Roganov and G. Raiko * * Code is derived from Ultra/PCI PSYCHO controller support, see that * for author info. * * Support for diverse IIep based platforms by Pete Zaitcev. * CP-1200 by Eric Brower. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <asm/swift.h> /* for cache flushing. */ #include <asm/io.h> #include <linux/ctype.h> #include <linux/pci.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/interrupt.h> #include <asm/irq.h> #include <asm/oplib.h> #include <asm/prom.h> #include <asm/pcic.h> #include <asm/timex.h> #include <asm/timer.h> #include <asm/uaccess.h> #include <asm/irq_regs.h> #include "irq.h" /* * I studied different documents and many live PROMs both from 2.30 * family and 3.xx versions. I came to the amazing conclusion: there is * absolutely no way to route interrupts in IIep systems relying on * information which PROM presents. We must hardcode interrupt routing * schematics. And this actually sucks. -- zaitcev 1999/05/12 * * To find irq for a device we determine which routing map * is in effect or, in other words, on which machine we are running. * We use PROM name for this although other techniques may be used * in special cases (Gleb reports a PROMless IIep based system). * Once we know the map we take device configuration address and * find PCIC pin number where INT line goes. Then we may either program * preferred irq into the PCIC or supply the preexisting irq to the device. */ struct pcic_ca2irq { unsigned char busno; /* PCI bus number */ unsigned char devfn; /* Configuration address */ unsigned char pin; /* PCIC external interrupt pin */ unsigned char irq; /* Preferred IRQ (mappable in PCIC) */ unsigned int force; /* Enforce preferred IRQ */ }; struct pcic_sn2list { char *sysname; struct pcic_ca2irq *intmap; int mapdim; }; /* * JavaEngine-1 apparently has different versions. * * According to communications with Sun folks, for P2 build 501-4628-03: * pin 0 - parallel, audio; * pin 1 - Ethernet; * pin 2 - su; * pin 3 - PS/2 kbd and mouse. * * OEM manual (805-1486): * pin 0: Ethernet * pin 1: All EBus * pin 2: IGA (unused) * pin 3: Not connected * OEM manual says that 501-4628 & 501-4811 are the same thing, * only the latter has NAND flash in place. * * So far unofficial Sun wins over the OEM manual. Poor OEMs... */ static struct pcic_ca2irq pcic_i_je1a[] = { /* 501-4811-03 */ { 0, 0x00, 2, 12, 0 }, /* EBus: hogs all */ { 0, 0x01, 1, 6, 1 }, /* Happy Meal */ { 0, 0x80, 0, 7, 0 }, /* IGA (unused) */ }; /* XXX JS-E entry is incomplete - PCI Slot 2 address (pin 7)? */ static struct pcic_ca2irq pcic_i_jse[] = { { 0, 0x00, 0, 13, 0 }, /* Ebus - serial and keyboard */ { 0, 0x01, 1, 6, 0 }, /* hme */ { 0, 0x08, 2, 9, 0 }, /* VGA - we hope not used :) */ { 0, 0x10, 6, 8, 0 }, /* PCI INTA# in Slot 1 */ { 0, 0x18, 7, 12, 0 }, /* PCI INTA# in Slot 2, shared w. RTC */ { 0, 0x38, 4, 9, 0 }, /* All ISA devices. Read 8259. */ { 0, 0x80, 5, 11, 0 }, /* EIDE */ /* {0,0x88, 0,0,0} - unknown device... PMU? Probably no interrupt. */ { 0, 0xA0, 4, 9, 0 }, /* USB */ /* * Some pins belong to non-PCI devices, we hardcode them in drivers. * sun4m timers - irq 10, 14 * PC style RTC - pin 7, irq 4 ? * Smart card, Parallel - pin 4 shared with USB, ISA * audio - pin 3, irq 5 ? */ }; /* SPARCengine-6 was the original release name of CP1200. * The documentation differs between the two versions */ static struct pcic_ca2irq pcic_i_se6[] = { { 0, 0x08, 0, 2, 0 }, /* SCSI */ { 0, 0x01, 1, 6, 0 }, /* HME */ { 0, 0x00, 3, 13, 0 }, /* EBus */ }; /* * Krups (courtesy of Varol Kaptan) * No documentation available, but it was easy to guess * because it was very similar to Espresso. * * pin 0 - kbd, mouse, serial; * pin 1 - Ethernet; * pin 2 - igs (we do not use it); * pin 3 - audio; * pin 4,5,6 - unused; * pin 7 - RTC (from P2 onwards as David B. says). */ static struct pcic_ca2irq pcic_i_jk[] = { { 0, 0x00, 0, 13, 0 }, /* Ebus - serial and keyboard */ { 0, 0x01, 1, 6, 0 }, /* hme */ }; /* * Several entries in this list may point to the same routing map * as several PROMs may be installed on the same physical board. */ #define SN2L_INIT(name, map) \ { name, map, ARRAY_SIZE(map) } static struct pcic_sn2list pcic_known_sysnames[] = { SN2L_INIT("SUNW,JavaEngine1", pcic_i_je1a), /* JE1, PROM 2.32 */ SN2L_INIT("SUNW,JS-E", pcic_i_jse), /* PROLL JavaStation-E */ SN2L_INIT("SUNW,SPARCengine-6", pcic_i_se6), /* SPARCengine-6/CP-1200 */ SN2L_INIT("SUNW,JS-NC", pcic_i_jk), /* PROLL JavaStation-NC */ SN2L_INIT("SUNW,JSIIep", pcic_i_jk), /* OBP JavaStation-NC */ { NULL, NULL, 0 } }; /* * Only one PCIC per IIep, * and since we have no SMP IIep, only one per system. */ static int pcic0_up; static struct linux_pcic pcic0; void __iomem *pcic_regs; volatile int pcic_speculative; volatile int pcic_trapped; #define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3)) static int pcic_read_config_dword(unsigned int busno, unsigned int devfn, int where, u32 *value) { struct linux_pcic *pcic; unsigned long flags; pcic = &pcic0; local_irq_save(flags); #if 0 /* does not fail here */ pcic_speculative = 1; pcic_trapped = 0; #endif writel(CONFIG_CMD(busno, devfn, where), pcic->pcic_config_space_addr); #if 0 /* does not fail here */ nop(); if (pcic_trapped) { local_irq_restore(flags); *value = ~0; return 0; } #endif pcic_speculative = 2; pcic_trapped = 0; *value = readl(pcic->pcic_config_space_data + (where&4)); nop(); if (pcic_trapped) { pcic_speculative = 0; local_irq_restore(flags); *value = ~0; return 0; } pcic_speculative = 0; local_irq_restore(flags); return 0; } static int pcic_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { unsigned int v; if (bus->number != 0) return -EINVAL; switch (size) { case 1: pcic_read_config_dword(bus->number, devfn, where&~3, &v); *val = 0xff & (v >> (8*(where & 3))); return 0; case 2: if (where&1) return -EINVAL; pcic_read_config_dword(bus->number, devfn, where&~3, &v); *val = 0xffff & (v >> (8*(where & 3))); return 0; case 4: if (where&3) return -EINVAL; pcic_read_config_dword(bus->number, devfn, where&~3, val); return 0; } return -EINVAL; } static int pcic_write_config_dword(unsigned int busno, unsigned int devfn, int where, u32 value) { struct linux_pcic *pcic; unsigned long flags; pcic = &pcic0; local_irq_save(flags); writel(CONFIG_CMD(busno, devfn, where), pcic->pcic_config_space_addr); writel(value, pcic->pcic_config_space_data + (where&4)); local_irq_restore(flags); return 0; } static int pcic_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { unsigned int v; if (bus->number != 0) return -EINVAL; switch (size) { case 1: pcic_read_config_dword(bus->number, devfn, where&~3, &v); v = (v & ~(0xff << (8*(where&3)))) | ((0xff&val) << (8*(where&3))); return pcic_write_config_dword(bus->number, devfn, where&~3, v); case 2: if (where&1) return -EINVAL; pcic_read_config_dword(bus->number, devfn, where&~3, &v); v = (v & ~(0xffff << (8*(where&3)))) | ((0xffff&val) << (8*(where&3))); return pcic_write_config_dword(bus->number, devfn, where&~3, v); case 4: if (where&3) return -EINVAL; return pcic_write_config_dword(bus->number, devfn, where, val); } return -EINVAL; } static struct pci_ops pcic_ops = { .read = pcic_read_config, .write = pcic_write_config, }; /* * On sparc64 pcibios_init() calls pci_controller_probe(). * We want PCIC probed little ahead so that interrupt controller * would be operational. */ int __init pcic_probe(void) { struct linux_pcic *pcic; struct linux_prom_registers regs[PROMREG_MAX]; struct linux_pbm_info* pbm; char namebuf[64]; int node; int err; if (pcic0_up) { prom_printf("PCIC: called twice!\n"); prom_halt(); } pcic = &pcic0; node = prom_getchild (prom_root_node); node = prom_searchsiblings (node, "pci"); if (node == 0) return -ENODEV; /* * Map in PCIC register set, config space, and IO base */ err = prom_getproperty(node, "reg", (char*)regs, sizeof(regs)); if (err == 0 || err == -1) { prom_printf("PCIC: Error, cannot get PCIC registers " "from PROM.\n"); prom_halt(); } pcic0_up = 1; pcic->pcic_res_regs.name = "pcic_registers"; pcic->pcic_regs = ioremap(regs[0].phys_addr, regs[0].reg_size); if (!pcic->pcic_regs) { prom_printf("PCIC: Error, cannot map PCIC registers.\n"); prom_halt(); } pcic->pcic_res_io.name = "pcic_io"; if ((pcic->pcic_io = (unsigned long) ioremap(regs[1].phys_addr, 0x10000)) == 0) { prom_printf("PCIC: Error, cannot map PCIC IO Base.\n"); prom_halt(); } pcic->pcic_res_cfg_addr.name = "pcic_cfg_addr"; if ((pcic->pcic_config_space_addr = ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == 0) { prom_printf("PCIC: Error, cannot map " "PCI Configuration Space Address.\n"); prom_halt(); } /* * Docs say three least significant bits in address and data * must be the same. Thus, we need adjust size of data. */ pcic->pcic_res_cfg_data.name = "pcic_cfg_data"; if ((pcic->pcic_config_space_data = ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == 0) { prom_printf("PCIC: Error, cannot map " "PCI Configuration Space Data.\n"); prom_halt(); } pbm = &pcic->pbm; pbm->prom_node = node; prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0; strcpy(pbm->prom_name, namebuf); { extern volatile int t_nmi[1]; extern int pcic_nmi_trap_patch[1]; t_nmi[0] = pcic_nmi_trap_patch[0]; t_nmi[1] = pcic_nmi_trap_patch[1]; t_nmi[2] = pcic_nmi_trap_patch[2]; t_nmi[3] = pcic_nmi_trap_patch[3]; swift_flush_dcache(); pcic_regs = pcic->pcic_regs; } prom_getstring(prom_root_node, "name", namebuf, 63); namebuf[63] = 0; { struct pcic_sn2list *p; for (p = pcic_known_sysnames; p->sysname != NULL; p++) { if (strcmp(namebuf, p->sysname) == 0) break; } pcic->pcic_imap = p->intmap; pcic->pcic_imdim = p->mapdim; } if (pcic->pcic_imap == NULL) { /* * We do not panic here for the sake of embedded systems. */ printk("PCIC: System %s is unknown, cannot route interrupts\n", namebuf); } return 0; } static void __init pcic_pbm_scan_bus(struct linux_pcic *pcic) { struct linux_pbm_info *pbm = &pcic->pbm; pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, &pcic_ops, pbm); #if 0 /* deadwood transplanted from sparc64 */ pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node); pci_record_assignments(pbm, pbm->pci_bus); pci_assign_unassigned(pbm, pbm->pci_bus); pci_fixup_irq(pbm, pbm->pci_bus); #endif } /* * Main entry point from the PCI subsystem. */ static int __init pcic_init(void) { struct linux_pcic *pcic; /* * PCIC should be initialized at start of the timer. * So, here we report the presence of PCIC and do some magic passes. */ if(!pcic0_up) return 0; pcic = &pcic0; /* * Switch off IOTLB translation. */ writeb(PCI_DVMA_CONTROL_IOTLB_DISABLE, pcic->pcic_regs+PCI_DVMA_CONTROL); /* * Increase mapped size for PCI memory space (DMA access). * Should be done in that order (size first, address second). * Why we couldn't set up 4GB and forget about it? XXX */ writel(0xF0000000UL, pcic->pcic_regs+PCI_SIZE_0); writel(0+PCI_BASE_ADDRESS_SPACE_MEMORY, pcic->pcic_regs+PCI_BASE_ADDRESS_0); pcic_pbm_scan_bus(pcic); return 0; } int pcic_present(void) { return pcic0_up; } static int __devinit pdev_to_pnode(struct linux_pbm_info *pbm, struct pci_dev *pdev) { struct linux_prom_pci_registers regs[PROMREG_MAX]; int err; int node = prom_getchild(pbm->prom_node); while(node) { err = prom_getproperty(node, "reg", (char *)&regs[0], sizeof(regs)); if(err != 0 && err != -1) { unsigned long devfn = (regs[0].which_io >> 8) & 0xff; if(devfn == pdev->devfn) return node; } node = prom_getsibling(node); } return 0; } static inline struct pcidev_cookie *pci_devcookie_alloc(void) { return kmalloc(sizeof(struct pcidev_cookie), GFP_ATOMIC); } static void pcic_map_pci_device(struct linux_pcic *pcic, struct pci_dev *dev, int node) { char namebuf[64]; unsigned long address; unsigned long flags; int j; if (node == 0 || node == -1) { strcpy(namebuf, "???"); } else { prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0; } for (j = 0; j < 6; j++) { address = dev->resource[j].start; if (address == 0) break; /* are sequential */ flags = dev->resource[j].flags; if ((flags & IORESOURCE_IO) != 0) { if (address < 0x10000) { /* * A device responds to I/O cycles on PCI. * We generate these cycles with memory * access into the fixed map (phys 0x30000000). * * Since a device driver does not want to * do ioremap() before accessing PC-style I/O, * we supply virtual, ready to access address. * * Note that request_region() * works for these devices. * * XXX Neat trick, but it's a *bad* idea * to shit into regions like that. * What if we want to allocate one more * PCI base address... */ dev->resource[j].start = pcic->pcic_io + address; dev->resource[j].end = 1; /* XXX */ dev->resource[j].flags = (flags & ~IORESOURCE_IO) | IORESOURCE_MEM; } else { /* * OOPS... PCI Spec allows this. Sun does * not have any devices getting above 64K * so it must be user with a weird I/O * board in a PCI slot. We must remap it * under 64K but it is not done yet. XXX */ printk("PCIC: Skipping I/O space at 0x%lx, " "this will Oops if a driver attaches " "device '%s' at %02x:%02x)\n", address, namebuf, dev->bus->number, dev->devfn); } } } } static void pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node) { struct pcic_ca2irq *p; int i, ivec; char namebuf[64]; if (node == 0 || node == -1) { strcpy(namebuf, "???"); } else { prom_getstring(node, "name", namebuf, sizeof(namebuf)); } if ((p = pcic->pcic_imap) == 0) { dev->irq = 0; return; } for (i = 0; i < pcic->pcic_imdim; i++) { if (p->busno == dev->bus->number && p->devfn == dev->devfn) break; p++; } if (i >= pcic->pcic_imdim) { printk("PCIC: device %s devfn %02x:%02x not found in %d\n", namebuf, dev->bus->number, dev->devfn, pcic->pcic_imdim); dev->irq = 0; return; } i = p->pin; if (i >= 0 && i < 4) { ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO); dev->irq = ivec >> (i << 2) & 0xF; } else if (i >= 4 && i < 8) { ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI); dev->irq = ivec >> ((i-4) << 2) & 0xF; } else { /* Corrupted map */ printk("PCIC: BAD PIN %d\n", i); for (;;) {} } /* P3 */ /* printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq); */ /* * dev->irq=0 means PROM did not bother to program the upper * half of PCIC. This happens on JS-E with PROM 3.11, for instance. */ if (dev->irq == 0 || p->force) { if (p->irq == 0 || p->irq >= 15) { /* Corrupted map */ printk("PCIC: BAD IRQ %d\n", p->irq); for (;;) {} } printk("PCIC: setting irq %d at pin %d for device %02x:%02x\n", p->irq, p->pin, dev->bus->number, dev->devfn); dev->irq = p->irq; i = p->pin; if (i >= 4) { ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI); ivec &= ~(0xF << ((i - 4) << 2)); ivec |= p->irq << ((i - 4) << 2); writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_HI); } else { ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO); ivec &= ~(0xF << (i << 2)); ivec |= p->irq << (i << 2); writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_LO); } } } /* * Normally called from {do_}pci_scan_bus... */ void __devinit pcibios_fixup_bus(struct pci_bus *bus) { struct pci_dev *dev; int i, has_io, has_mem; unsigned int cmd; struct linux_pcic *pcic; /* struct linux_pbm_info* pbm = &pcic->pbm; */ int node; struct pcidev_cookie *pcp; if (!pcic0_up) { printk("pcibios_fixup_bus: no PCIC\n"); return; } pcic = &pcic0; /* * Next crud is an equivalent of pbm = pcic_bus_to_pbm(bus); */ if (bus->number != 0) { printk("pcibios_fixup_bus: nonzero bus 0x%x\n", bus->number); return; } list_for_each_entry(dev, &bus->devices, bus_list) { /* * Comment from i386 branch: * There are buggy BIOSes that forget to enable I/O and memory * access to PCI devices. We try to fix this, but we need to * be sure that the BIOS didn't forget to assign an address * to the device. [mj] * OBP is a case of such BIOS :-) */ has_io = has_mem = 0; for(i=0; i<6; i++) { unsigned long f = dev->resource[i].flags; if (f & IORESOURCE_IO) { has_io = 1; } else if (f & IORESOURCE_MEM) has_mem = 1; } pcic_read_config(dev->bus, dev->devfn, PCI_COMMAND, 2, &cmd); if (has_io && !(cmd & PCI_COMMAND_IO)) { printk("PCIC: Enabling I/O for device %02x:%02x\n", dev->bus->number, dev->devfn); cmd |= PCI_COMMAND_IO; pcic_write_config(dev->bus, dev->devfn, PCI_COMMAND, 2, cmd); } if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) { printk("PCIC: Enabling memory for device %02x:%02x\n", dev->bus->number, dev->devfn); cmd |= PCI_COMMAND_MEMORY; pcic_write_config(dev->bus, dev->devfn, PCI_COMMAND, 2, cmd); } node = pdev_to_pnode(&pcic->pbm, dev); if(node == 0) node = -1; /* cookies */ pcp = pci_devcookie_alloc(); pcp->pbm = &pcic->pbm; pcp->prom_node = of_find_node_by_phandle(node); dev->sysdata = pcp; /* fixing I/O to look like memory */ if ((dev->class>>16) != PCI_BASE_CLASS_BRIDGE) pcic_map_pci_device(pcic, dev, node); pcic_fill_irq(pcic, dev, node); } } /* * pcic_pin_to_irq() is exported to bus probing code */ unsigned int pcic_pin_to_irq(unsigned int pin, const char *name) { struct linux_pcic *pcic = &pcic0; unsigned int irq; unsigned int ivec; if (pin < 4) { ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO); irq = ivec >> (pin << 2) & 0xF; } else if (pin < 8) { ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI); irq = ivec >> ((pin-4) << 2) & 0xF; } else { /* Corrupted map */ printk("PCIC: BAD PIN %d FOR %s\n", pin, name); for (;;) {} /* XXX Cannot panic properly in case of PROLL */ } /* P3 */ /* printk("PCIC: dev %s pin %d ivec 0x%x irq %x\n", name, pin, ivec, irq); */ return irq; } /* Makes compiler happy */ static volatile int pcic_timer_dummy; static void pcic_clear_clock_irq(void) { pcic_timer_dummy = readl(pcic0.pcic_regs+PCI_SYS_LIMIT); } static irqreturn_t pcic_timer_handler (int irq, void *h) { write_seqlock(&xtime_lock); /* Dummy, to show that we remember */ pcic_clear_clock_irq(); do_timer(1); write_sequnlock(&xtime_lock); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); #endif return IRQ_HANDLED; } #define USECS_PER_JIFFY 10000 /* We have 100HZ "standard" timer for sparc */ #define TICK_TIMER_LIMIT ((100*1000000/4)/100) u32 pci_gettimeoffset(void) { /* * We divide all by 100 * to have microsecond resolution and to avoid overflow */ unsigned long count = readl(pcic0.pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW; count = ((count/100)*USECS_PER_JIFFY) / (TICK_TIMER_LIMIT/100); return count * 1000; } void __init pci_time_init(void) { struct linux_pcic *pcic = &pcic0; unsigned long v; int timer_irq, irq; do_arch_gettimeoffset = pci_gettimeoffset; btfixup(); writel (TICK_TIMER_LIMIT, pcic->pcic_regs+PCI_SYS_LIMIT); /* PROM should set appropriate irq */ v = readb(pcic->pcic_regs+PCI_COUNTER_IRQ); timer_irq = PCI_COUNTER_IRQ_SYS(v); writel (PCI_COUNTER_IRQ_SET(timer_irq, 0), pcic->pcic_regs+PCI_COUNTER_IRQ); irq = request_irq(timer_irq, pcic_timer_handler, (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL); if (irq) { prom_printf("time_init: unable to attach IRQ%d\n", timer_irq); prom_halt(); } local_irq_enable(); } #if 0 static void watchdog_reset() { writeb(0, pcic->pcic_regs+PCI_SYS_STATUS); } #endif /* * Other archs parse arguments here. */ char * __devinit pcibios_setup(char *str) { return str; } resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { return res->start; } int pcibios_enable_device(struct pci_dev *pdev, int mask) { return 0; } /* * NMI */ void pcic_nmi(unsigned int pend, struct pt_regs *regs) { pend = flip_dword(pend); if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) { /* * XXX On CP-1200 PCI #SERR may happen, we do not know * what to do about it yet. */ printk("Aiee, NMI pend 0x%x pc 0x%x spec %d, hanging\n", pend, (int)regs->pc, pcic_speculative); for (;;) { } } pcic_speculative = 0; pcic_trapped = 1; regs->pc = regs->npc; regs->npc += 4; } static inline unsigned long get_irqmask(int irq_nr) { return 1 << irq_nr; } static void pcic_disable_irq(unsigned int irq_nr) { unsigned long mask, flags; mask = get_irqmask(irq_nr); local_irq_save(flags); writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET); local_irq_restore(flags); } static void pcic_enable_irq(unsigned int irq_nr) { unsigned long mask, flags; mask = get_irqmask(irq_nr); local_irq_save(flags); writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR); local_irq_restore(flags); } static void pcic_load_profile_irq(int cpu, unsigned int limit) { printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__); } /* We assume the caller has disabled local interrupts when these are called, * or else very bizarre behavior will result. */ static void pcic_disable_pil_irq(unsigned int pil) { writel(get_irqmask(pil), pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET); } static void pcic_enable_pil_irq(unsigned int pil) { writel(get_irqmask(pil), pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR); } void __init sun4m_pci_init_IRQ(void) { BTFIXUPSET_CALL(enable_irq, pcic_enable_irq, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(disable_irq, pcic_disable_irq, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(enable_pil_irq, pcic_enable_pil_irq, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(disable_pil_irq, pcic_disable_pil_irq, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(clear_clock_irq, pcic_clear_clock_irq, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(load_profile_irq, pcic_load_profile_irq, BTFIXUPCALL_NORM); } int pcibios_assign_resource(struct pci_dev *pdev, int resource) { return -ENXIO; } struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) { struct pcidev_cookie *pc = pdev->sysdata; return pc->prom_node; } EXPORT_SYMBOL(pci_device_to_OF_node); /* * This probably belongs here rather than ioport.c because * we do not want this crud linked into SBus kernels. * Also, think for a moment about likes of floppy.c that * include architecture specific parts. They may want to redefine ins/outs. * * We do not use horrible macros here because we want to * advance pointer by sizeof(size). */ void outsb(unsigned long addr, const void *src, unsigned long count) { while (count) { count -= 1; outb(*(const char *)src, addr); src += 1; /* addr += 1; */ } } EXPORT_SYMBOL(outsb); void outsw(unsigned long addr, const void *src, unsigned long count) { while (count) { count -= 2; outw(*(const short *)src, addr); src += 2; /* addr += 2; */ } } EXPORT_SYMBOL(outsw); void outsl(unsigned long addr, const void *src, unsigned long count) { while (count) { count -= 4; outl(*(const long *)src, addr); src += 4; /* addr += 4; */ } } EXPORT_SYMBOL(outsl); void insb(unsigned long addr, void *dst, unsigned long count) { while (count) { count -= 1; *(unsigned char *)dst = inb(addr); dst += 1; /* addr += 1; */ } } EXPORT_SYMBOL(insb); void insw(unsigned long addr, void *dst, unsigned long count) { while (count) { count -= 2; *(unsigned short *)dst = inw(addr); dst += 2; /* addr += 2; */ } } EXPORT_SYMBOL(insw); void insl(unsigned long addr, void *dst, unsigned long count) { while (count) { count -= 4; /* * XXX I am sure we are in for an unaligned trap here. */ *(unsigned long *)dst = inl(addr); dst += 4; /* addr += 4; */ } } EXPORT_SYMBOL(insl); subsys_initcall(pcic_init);
gpl-2.0
Fusion-Devices/android_kernel_moto_shamu_old
drivers/tty/serial/ar933x_uart.c
2123
17996
/* * Atheros AR933X SoC built-in UART driver * * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org> * * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/irq.h> #include <asm/div64.h> #include <asm/mach-ath79/ar933x_uart.h> #include <asm/mach-ath79/ar933x_uart_platform.h> #define DRIVER_NAME "ar933x-uart" #define AR933X_UART_MAX_SCALE 0xff #define AR933X_UART_MAX_STEP 0xffff #define AR933X_UART_MIN_BAUD 300 #define AR933X_UART_MAX_BAUD 3000000 #define AR933X_DUMMY_STATUS_RD 0x01 static struct uart_driver ar933x_uart_driver; struct ar933x_uart_port { struct uart_port port; unsigned int ier; /* shadow Interrupt Enable Register */ unsigned int min_baud; unsigned int max_baud; }; static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up, int offset) { return readl(up->port.membase + offset); } static inline void ar933x_uart_write(struct ar933x_uart_port *up, int offset, unsigned int value) { writel(value, up->port.membase + offset); } static inline void ar933x_uart_rmw(struct ar933x_uart_port *up, unsigned int offset, unsigned int mask, unsigned int val) { unsigned int t; t = ar933x_uart_read(up, offset); t &= ~mask; t |= val; ar933x_uart_write(up, offset, t); } static inline void ar933x_uart_rmw_set(struct ar933x_uart_port *up, unsigned int offset, unsigned int val) { ar933x_uart_rmw(up, offset, 0, val); } static inline void ar933x_uart_rmw_clear(struct ar933x_uart_port *up, unsigned int offset, unsigned int val) { ar933x_uart_rmw(up, offset, val, 0); } static inline void ar933x_uart_start_tx_interrupt(struct ar933x_uart_port *up) { up->ier |= AR933X_UART_INT_TX_EMPTY; ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier); } static inline void ar933x_uart_stop_tx_interrupt(struct ar933x_uart_port *up) { up->ier &= ~AR933X_UART_INT_TX_EMPTY; ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier); } static inline void ar933x_uart_putc(struct ar933x_uart_port *up, int ch) { unsigned int rdata; rdata = ch & AR933X_UART_DATA_TX_RX_MASK; rdata |= AR933X_UART_DATA_TX_CSR; ar933x_uart_write(up, AR933X_UART_DATA_REG, rdata); } static unsigned int ar933x_uart_tx_empty(struct uart_port *port) { struct ar933x_uart_port *up = (struct ar933x_uart_port *) port; unsigned long flags; unsigned int rdata; spin_lock_irqsave(&up->port.lock, flags); rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG); spin_unlock_irqrestore(&up->port.lock, flags); return (rdata & AR933X_UART_DATA_TX_CSR) ? 0 : TIOCSER_TEMT; } static unsigned int ar933x_uart_get_mctrl(struct uart_port *port) { return TIOCM_CAR; } static void ar933x_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) { } static void ar933x_uart_start_tx(struct uart_port *port) { struct ar933x_uart_port *up = (struct ar933x_uart_port *) port; ar933x_uart_start_tx_interrupt(up); } static void ar933x_uart_stop_tx(struct uart_port *port) { struct ar933x_uart_port *up = (struct ar933x_uart_port *) port; ar933x_uart_stop_tx_interrupt(up); } static void ar933x_uart_stop_rx(struct uart_port *port) { struct ar933x_uart_port *up = (struct ar933x_uart_port *) port; up->ier &= ~AR933X_UART_INT_RX_VALID; ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier); } static void ar933x_uart_break_ctl(struct uart_port *port, int break_state) { struct ar933x_uart_port *up = (struct ar933x_uart_port *) port; unsigned long flags; spin_lock_irqsave(&up->port.lock, flags); if (break_state == -1) ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, AR933X_UART_CS_TX_BREAK); else ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG, AR933X_UART_CS_TX_BREAK); spin_unlock_irqrestore(&up->port.lock, flags); } static void ar933x_uart_enable_ms(struct uart_port *port) { } /* * baudrate = (clk / (scale + 1)) * (step * (1 / 2^17)) */ static unsigned long ar933x_uart_get_baud(unsigned int clk, unsigned int scale, unsigned int step) { u64 t; u32 div; div = (2 << 16) * (scale + 1); t = clk; t *= step; t += (div / 2); do_div(t, div); return t; } static void ar933x_uart_get_scale_step(unsigned int clk, unsigned int baud, unsigned int *scale, unsigned int *step) { unsigned int tscale; long min_diff; *scale = 0; *step = 0; min_diff = baud; for (tscale = 0; tscale < AR933X_UART_MAX_SCALE; tscale++) { u64 tstep; int diff; tstep = baud * (tscale + 1); tstep *= (2 << 16); do_div(tstep, clk); if (tstep > AR933X_UART_MAX_STEP) break; diff = abs(ar933x_uart_get_baud(clk, tscale, tstep) - baud); if (diff < min_diff) { min_diff = diff; *scale = tscale; *step = tstep; } } } static void ar933x_uart_set_termios(struct uart_port *port, struct ktermios *new, struct ktermios *old) { struct ar933x_uart_port *up = (struct ar933x_uart_port *) port; unsigned int cs; unsigned long flags; unsigned int baud, scale, step; /* Only CS8 is supported */ new->c_cflag &= ~CSIZE; new->c_cflag |= CS8; /* Only one stop bit is supported */ new->c_cflag &= ~CSTOPB; cs = 0; if (new->c_cflag & PARENB) { if (!(new->c_cflag & PARODD)) cs |= AR933X_UART_CS_PARITY_EVEN; else cs |= AR933X_UART_CS_PARITY_ODD; } else { cs |= AR933X_UART_CS_PARITY_NONE; } /* Mark/space parity is not supported */ new->c_cflag &= ~CMSPAR; baud = uart_get_baud_rate(port, new, old, up->min_baud, up->max_baud); ar933x_uart_get_scale_step(port->uartclk, baud, &scale, &step); /* * Ok, we're now changing the port state. Do it with * interrupts disabled. */ spin_lock_irqsave(&up->port.lock, flags); /* disable the UART */ ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG, AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S); /* Update the per-port timeout. */ uart_update_timeout(port, new->c_cflag, baud); up->port.ignore_status_mask = 0; /* ignore all characters if CREAD is not set */ if ((new->c_cflag & CREAD) == 0) up->port.ignore_status_mask |= AR933X_DUMMY_STATUS_RD; ar933x_uart_write(up, AR933X_UART_CLOCK_REG, scale << AR933X_UART_CLOCK_SCALE_S | step); /* setup configuration register */ ar933x_uart_rmw(up, AR933X_UART_CS_REG, AR933X_UART_CS_PARITY_M, cs); /* enable host interrupt */ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, AR933X_UART_CS_HOST_INT_EN); /* reenable the UART */ ar933x_uart_rmw(up, AR933X_UART_CS_REG, AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S, AR933X_UART_CS_IF_MODE_DCE << AR933X_UART_CS_IF_MODE_S); spin_unlock_irqrestore(&up->port.lock, flags); if (tty_termios_baud_rate(new)) tty_termios_encode_baud_rate(new, baud, baud); } static void ar933x_uart_rx_chars(struct ar933x_uart_port *up) { struct tty_port *port = &up->port.state->port; int max_count = 256; do { unsigned int rdata; unsigned char ch; rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG); if ((rdata & AR933X_UART_DATA_RX_CSR) == 0) break; /* remove the character from the FIFO */ ar933x_uart_write(up, AR933X_UART_DATA_REG, AR933X_UART_DATA_RX_CSR); up->port.icount.rx++; ch = rdata & AR933X_UART_DATA_TX_RX_MASK; if (uart_handle_sysrq_char(&up->port, ch)) continue; if ((up->port.ignore_status_mask & AR933X_DUMMY_STATUS_RD) == 0) tty_insert_flip_char(port, ch, TTY_NORMAL); } while (max_count-- > 0); tty_flip_buffer_push(port); } static void ar933x_uart_tx_chars(struct ar933x_uart_port *up) { struct circ_buf *xmit = &up->port.state->xmit; int count; if (uart_tx_stopped(&up->port)) return; count = up->port.fifosize; do { unsigned int rdata; rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG); if ((rdata & AR933X_UART_DATA_TX_CSR) == 0) break; if (up->port.x_char) { ar933x_uart_putc(up, up->port.x_char); up->port.icount.tx++; up->port.x_char = 0; continue; } if (uart_circ_empty(xmit)) break; ar933x_uart_putc(up, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); up->port.icount.tx++; } while (--count > 0); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); if (!uart_circ_empty(xmit)) ar933x_uart_start_tx_interrupt(up); } static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id) { struct ar933x_uart_port *up = dev_id; unsigned int status; status = ar933x_uart_read(up, AR933X_UART_CS_REG); if ((status & AR933X_UART_CS_HOST_INT) == 0) return IRQ_NONE; spin_lock(&up->port.lock); status = ar933x_uart_read(up, AR933X_UART_INT_REG); status &= ar933x_uart_read(up, AR933X_UART_INT_EN_REG); if (status & AR933X_UART_INT_RX_VALID) { ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_RX_VALID); ar933x_uart_rx_chars(up); } if (status & AR933X_UART_INT_TX_EMPTY) { ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_TX_EMPTY); ar933x_uart_stop_tx_interrupt(up); ar933x_uart_tx_chars(up); } spin_unlock(&up->port.lock); return IRQ_HANDLED; } static int ar933x_uart_startup(struct uart_port *port) { struct ar933x_uart_port *up = (struct ar933x_uart_port *) port; unsigned long flags; int ret; ret = request_irq(up->port.irq, ar933x_uart_interrupt, up->port.irqflags, dev_name(up->port.dev), up); if (ret) return ret; spin_lock_irqsave(&up->port.lock, flags); /* Enable HOST interrupts */ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG, AR933X_UART_CS_HOST_INT_EN); /* Enable RX interrupts */ up->ier = AR933X_UART_INT_RX_VALID; ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier); spin_unlock_irqrestore(&up->port.lock, flags); return 0; } static void ar933x_uart_shutdown(struct uart_port *port) { struct ar933x_uart_port *up = (struct ar933x_uart_port *) port; /* Disable all interrupts */ up->ier = 0; ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier); /* Disable break condition */ ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG, AR933X_UART_CS_TX_BREAK); free_irq(up->port.irq, up); } static const char *ar933x_uart_type(struct uart_port *port) { return (port->type == PORT_AR933X) ? "AR933X UART" : NULL; } static void ar933x_uart_release_port(struct uart_port *port) { /* Nothing to release ... */ } static int ar933x_uart_request_port(struct uart_port *port) { /* UARTs always present */ return 0; } static void ar933x_uart_config_port(struct uart_port *port, int flags) { if (flags & UART_CONFIG_TYPE) port->type = PORT_AR933X; } static int ar933x_uart_verify_port(struct uart_port *port, struct serial_struct *ser) { struct ar933x_uart_port *up = (struct ar933x_uart_port *) port; if (ser->type != PORT_UNKNOWN && ser->type != PORT_AR933X) return -EINVAL; if (ser->irq < 0 || ser->irq >= NR_IRQS) return -EINVAL; if (ser->baud_base < up->min_baud || ser->baud_base > up->max_baud) return -EINVAL; return 0; } static struct uart_ops ar933x_uart_ops = { .tx_empty = ar933x_uart_tx_empty, .set_mctrl = ar933x_uart_set_mctrl, .get_mctrl = ar933x_uart_get_mctrl, .stop_tx = ar933x_uart_stop_tx, .start_tx = ar933x_uart_start_tx, .stop_rx = ar933x_uart_stop_rx, .enable_ms = ar933x_uart_enable_ms, .break_ctl = ar933x_uart_break_ctl, .startup = ar933x_uart_startup, .shutdown = ar933x_uart_shutdown, .set_termios = ar933x_uart_set_termios, .type = ar933x_uart_type, .release_port = ar933x_uart_release_port, .request_port = ar933x_uart_request_port, .config_port = ar933x_uart_config_port, .verify_port = ar933x_uart_verify_port, }; #ifdef CONFIG_SERIAL_AR933X_CONSOLE static struct ar933x_uart_port * ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS]; static void ar933x_uart_wait_xmitr(struct ar933x_uart_port *up) { unsigned int status; unsigned int timeout = 60000; /* Wait up to 60ms for the character(s) to be sent. */ do { status = ar933x_uart_read(up, AR933X_UART_DATA_REG); if (--timeout == 0) break; udelay(1); } while ((status & AR933X_UART_DATA_TX_CSR) == 0); } static void ar933x_uart_console_putchar(struct uart_port *port, int ch) { struct ar933x_uart_port *up = (struct ar933x_uart_port *) port; ar933x_uart_wait_xmitr(up); ar933x_uart_putc(up, ch); } static void ar933x_uart_console_write(struct console *co, const char *s, unsigned int count) { struct ar933x_uart_port *up = ar933x_console_ports[co->index]; unsigned long flags; unsigned int int_en; int locked = 1; local_irq_save(flags); if (up->port.sysrq) locked = 0; else if (oops_in_progress) locked = spin_trylock(&up->port.lock); else spin_lock(&up->port.lock); /* * First save the IER then disable the interrupts */ int_en = ar933x_uart_read(up, AR933X_UART_INT_EN_REG); ar933x_uart_write(up, AR933X_UART_INT_EN_REG, 0); uart_console_write(&up->port, s, count, ar933x_uart_console_putchar); /* * Finally, wait for transmitter to become empty * and restore the IER */ ar933x_uart_wait_xmitr(up); ar933x_uart_write(up, AR933X_UART_INT_EN_REG, int_en); ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_ALLINTS); if (locked) spin_unlock(&up->port.lock); local_irq_restore(flags); } static int ar933x_uart_console_setup(struct console *co, char *options) { struct ar933x_uart_port *up; int baud = 115200; int bits = 8; int parity = 'n'; int flow = 'n'; if (co->index < 0 || co->index >= CONFIG_SERIAL_AR933X_NR_UARTS) return -EINVAL; up = ar933x_console_ports[co->index]; if (!up) return -ENODEV; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(&up->port, co, baud, parity, bits, flow); } static struct console ar933x_uart_console = { .name = "ttyATH", .write = ar933x_uart_console_write, .device = uart_console_device, .setup = ar933x_uart_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &ar933x_uart_driver, }; static void ar933x_uart_add_console_port(struct ar933x_uart_port *up) { ar933x_console_ports[up->port.line] = up; } #define AR933X_SERIAL_CONSOLE (&ar933x_uart_console) #else static inline void ar933x_uart_add_console_port(struct ar933x_uart_port *up) {} #define AR933X_SERIAL_CONSOLE NULL #endif /* CONFIG_SERIAL_AR933X_CONSOLE */ static struct uart_driver ar933x_uart_driver = { .owner = THIS_MODULE, .driver_name = DRIVER_NAME, .dev_name = "ttyATH", .nr = CONFIG_SERIAL_AR933X_NR_UARTS, .cons = AR933X_SERIAL_CONSOLE, }; static int ar933x_uart_probe(struct platform_device *pdev) { struct ar933x_uart_platform_data *pdata; struct ar933x_uart_port *up; struct uart_port *port; struct resource *mem_res; struct resource *irq_res; unsigned int baud; int id; int ret; pdata = pdev->dev.platform_data; if (!pdata) return -EINVAL; id = pdev->id; if (id == -1) id = 0; if (id > CONFIG_SERIAL_AR933X_NR_UARTS) return -EINVAL; mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem_res) { dev_err(&pdev->dev, "no MEM resource\n"); return -EINVAL; } irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq_res) { dev_err(&pdev->dev, "no IRQ resource\n"); return -EINVAL; } up = kzalloc(sizeof(struct ar933x_uart_port), GFP_KERNEL); if (!up) return -ENOMEM; port = &up->port; port->mapbase = mem_res->start; port->membase = ioremap(mem_res->start, AR933X_UART_REGS_SIZE); if (!port->membase) { ret = -ENOMEM; goto err_free_up; } port->line = id; port->irq = irq_res->start; port->dev = &pdev->dev; port->type = PORT_AR933X; port->iotype = UPIO_MEM32; port->uartclk = pdata->uartclk; port->regshift = 2; port->fifosize = AR933X_UART_FIFO_SIZE; port->ops = &ar933x_uart_ops; baud = ar933x_uart_get_baud(port->uartclk, AR933X_UART_MAX_SCALE, 1); up->min_baud = max_t(unsigned int, baud, AR933X_UART_MIN_BAUD); baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP); up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD); ar933x_uart_add_console_port(up); ret = uart_add_one_port(&ar933x_uart_driver, &up->port); if (ret) goto err_unmap; platform_set_drvdata(pdev, up); return 0; err_unmap: iounmap(up->port.membase); err_free_up: kfree(up); return ret; } static int ar933x_uart_remove(struct platform_device *pdev) { struct ar933x_uart_port *up; up = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); if (up) { uart_remove_one_port(&ar933x_uart_driver, &up->port); iounmap(up->port.membase); kfree(up); } return 0; } static struct platform_driver ar933x_uart_platform_driver = { .probe = ar933x_uart_probe, .remove = ar933x_uart_remove, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; static int __init ar933x_uart_init(void) { int ret; ar933x_uart_driver.nr = CONFIG_SERIAL_AR933X_NR_UARTS; ret = uart_register_driver(&ar933x_uart_driver); if (ret) goto err_out; ret = platform_driver_register(&ar933x_uart_platform_driver); if (ret) goto err_unregister_uart_driver; return 0; err_unregister_uart_driver: uart_unregister_driver(&ar933x_uart_driver); err_out: return ret; } static void __exit ar933x_uart_exit(void) { platform_driver_unregister(&ar933x_uart_platform_driver); uart_unregister_driver(&ar933x_uart_driver); } module_init(ar933x_uart_init); module_exit(ar933x_uart_exit); MODULE_DESCRIPTION("Atheros AR933X UART driver"); MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME);
gpl-2.0
kirananto/RaZorLettuce
drivers/tty/serial/sa1100.c
2123
23042
/* * Driver for SA11x0 serial ports * * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. * * Copyright (C) 2000 Deep Blue Solutions Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if defined(CONFIG_SERIAL_SA1100_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/platform_data/sa11x0-serial.h> #include <linux/platform_device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/io.h> #include <asm/irq.h> #include <mach/hardware.h> #include <mach/irqs.h> /* We've been assigned a range on the "Low-density serial ports" major */ #define SERIAL_SA1100_MAJOR 204 #define MINOR_START 5 #define NR_PORTS 3 #define SA1100_ISR_PASS_LIMIT 256 /* * Convert from ignore_status_mask or read_status_mask to UTSR[01] */ #define SM_TO_UTSR0(x) ((x) & 0xff) #define SM_TO_UTSR1(x) ((x) >> 8) #define UTSR0_TO_SM(x) ((x)) #define UTSR1_TO_SM(x) ((x) << 8) #define UART_GET_UTCR0(sport) __raw_readl((sport)->port.membase + UTCR0) #define UART_GET_UTCR1(sport) __raw_readl((sport)->port.membase + UTCR1) #define UART_GET_UTCR2(sport) __raw_readl((sport)->port.membase + UTCR2) #define UART_GET_UTCR3(sport) __raw_readl((sport)->port.membase + UTCR3) #define UART_GET_UTSR0(sport) __raw_readl((sport)->port.membase + UTSR0) #define UART_GET_UTSR1(sport) __raw_readl((sport)->port.membase + UTSR1) #define UART_GET_CHAR(sport) __raw_readl((sport)->port.membase + UTDR) #define UART_PUT_UTCR0(sport,v) __raw_writel((v),(sport)->port.membase + UTCR0) #define UART_PUT_UTCR1(sport,v) __raw_writel((v),(sport)->port.membase + UTCR1) #define UART_PUT_UTCR2(sport,v) __raw_writel((v),(sport)->port.membase + UTCR2) #define UART_PUT_UTCR3(sport,v) __raw_writel((v),(sport)->port.membase + UTCR3) #define UART_PUT_UTSR0(sport,v) __raw_writel((v),(sport)->port.membase + UTSR0) #define UART_PUT_UTSR1(sport,v) __raw_writel((v),(sport)->port.membase + UTSR1) #define UART_PUT_CHAR(sport,v) __raw_writel((v),(sport)->port.membase + UTDR) /* * This is the size of our serial port register set. */ #define UART_PORT_SIZE 0x24 /* * This determines how often we check the modem status signals * for any change. They generally aren't connected to an IRQ * so we have to poll them. We also check immediately before * filling the TX fifo incase CTS has been dropped. */ #define MCTRL_TIMEOUT (250*HZ/1000) struct sa1100_port { struct uart_port port; struct timer_list timer; unsigned int old_status; }; /* * Handle any change of modem status signal since we were last called. */ static void sa1100_mctrl_check(struct sa1100_port *sport) { unsigned int status, changed; status = sport->port.ops->get_mctrl(&sport->port); changed = status ^ sport->old_status; if (changed == 0) return; sport->old_status = status; if (changed & TIOCM_RI) sport->port.icount.rng++; if (changed & TIOCM_DSR) sport->port.icount.dsr++; if (changed & TIOCM_CAR) uart_handle_dcd_change(&sport->port, status & TIOCM_CAR); if (changed & TIOCM_CTS) uart_handle_cts_change(&sport->port, status & TIOCM_CTS); wake_up_interruptible(&sport->port.state->port.delta_msr_wait); } /* * This is our per-port timeout handler, for checking the * modem status signals. */ static void sa1100_timeout(unsigned long data) { struct sa1100_port *sport = (struct sa1100_port *)data; unsigned long flags; if (sport->port.state) { spin_lock_irqsave(&sport->port.lock, flags); sa1100_mctrl_check(sport); spin_unlock_irqrestore(&sport->port.lock, flags); mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT); } } /* * interrupts disabled on entry */ static void sa1100_stop_tx(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; u32 utcr3; utcr3 = UART_GET_UTCR3(sport); UART_PUT_UTCR3(sport, utcr3 & ~UTCR3_TIE); sport->port.read_status_mask &= ~UTSR0_TO_SM(UTSR0_TFS); } /* * port locked and interrupts disabled */ static void sa1100_start_tx(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; u32 utcr3; utcr3 = UART_GET_UTCR3(sport); sport->port.read_status_mask |= UTSR0_TO_SM(UTSR0_TFS); UART_PUT_UTCR3(sport, utcr3 | UTCR3_TIE); } /* * Interrupts enabled */ static void sa1100_stop_rx(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; u32 utcr3; utcr3 = UART_GET_UTCR3(sport); UART_PUT_UTCR3(sport, utcr3 & ~UTCR3_RIE); } /* * Set the modem control timer to fire immediately. */ static void sa1100_enable_ms(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; mod_timer(&sport->timer, jiffies); } static void sa1100_rx_chars(struct sa1100_port *sport) { unsigned int status, ch, flg; status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) | UTSR0_TO_SM(UART_GET_UTSR0(sport)); while (status & UTSR1_TO_SM(UTSR1_RNE)) { ch = UART_GET_CHAR(sport); sport->port.icount.rx++; flg = TTY_NORMAL; /* * note that the error handling code is * out of the main execution path */ if (status & UTSR1_TO_SM(UTSR1_PRE | UTSR1_FRE | UTSR1_ROR)) { if (status & UTSR1_TO_SM(UTSR1_PRE)) sport->port.icount.parity++; else if (status & UTSR1_TO_SM(UTSR1_FRE)) sport->port.icount.frame++; if (status & UTSR1_TO_SM(UTSR1_ROR)) sport->port.icount.overrun++; status &= sport->port.read_status_mask; if (status & UTSR1_TO_SM(UTSR1_PRE)) flg = TTY_PARITY; else if (status & UTSR1_TO_SM(UTSR1_FRE)) flg = TTY_FRAME; #ifdef SUPPORT_SYSRQ sport->port.sysrq = 0; #endif } if (uart_handle_sysrq_char(&sport->port, ch)) goto ignore_char; uart_insert_char(&sport->port, status, UTSR1_TO_SM(UTSR1_ROR), ch, flg); ignore_char: status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) | UTSR0_TO_SM(UART_GET_UTSR0(sport)); } tty_flip_buffer_push(&sport->port.state->port); } static void sa1100_tx_chars(struct sa1100_port *sport) { struct circ_buf *xmit = &sport->port.state->xmit; if (sport->port.x_char) { UART_PUT_CHAR(sport, sport->port.x_char); sport->port.icount.tx++; sport->port.x_char = 0; return; } /* * Check the modem control lines before * transmitting anything. */ sa1100_mctrl_check(sport); if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) { sa1100_stop_tx(&sport->port); return; } /* * Tried using FIFO (not checking TNF) for fifo fill: * still had the '4 bytes repeated' problem. */ while (UART_GET_UTSR1(sport) & UTSR1_TNF) { UART_PUT_CHAR(sport, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); sport->port.icount.tx++; if (uart_circ_empty(xmit)) break; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&sport->port); if (uart_circ_empty(xmit)) sa1100_stop_tx(&sport->port); } static irqreturn_t sa1100_int(int irq, void *dev_id) { struct sa1100_port *sport = dev_id; unsigned int status, pass_counter = 0; spin_lock(&sport->port.lock); status = UART_GET_UTSR0(sport); status &= SM_TO_UTSR0(sport->port.read_status_mask) | ~UTSR0_TFS; do { if (status & (UTSR0_RFS | UTSR0_RID)) { /* Clear the receiver idle bit, if set */ if (status & UTSR0_RID) UART_PUT_UTSR0(sport, UTSR0_RID); sa1100_rx_chars(sport); } /* Clear the relevant break bits */ if (status & (UTSR0_RBB | UTSR0_REB)) UART_PUT_UTSR0(sport, status & (UTSR0_RBB | UTSR0_REB)); if (status & UTSR0_RBB) sport->port.icount.brk++; if (status & UTSR0_REB) uart_handle_break(&sport->port); if (status & UTSR0_TFS) sa1100_tx_chars(sport); if (pass_counter++ > SA1100_ISR_PASS_LIMIT) break; status = UART_GET_UTSR0(sport); status &= SM_TO_UTSR0(sport->port.read_status_mask) | ~UTSR0_TFS; } while (status & (UTSR0_TFS | UTSR0_RFS | UTSR0_RID)); spin_unlock(&sport->port.lock); return IRQ_HANDLED; } /* * Return TIOCSER_TEMT when transmitter is not busy. */ static unsigned int sa1100_tx_empty(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; return UART_GET_UTSR1(sport) & UTSR1_TBY ? 0 : TIOCSER_TEMT; } static unsigned int sa1100_get_mctrl(struct uart_port *port) { return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; } static void sa1100_set_mctrl(struct uart_port *port, unsigned int mctrl) { } /* * Interrupts always disabled. */ static void sa1100_break_ctl(struct uart_port *port, int break_state) { struct sa1100_port *sport = (struct sa1100_port *)port; unsigned long flags; unsigned int utcr3; spin_lock_irqsave(&sport->port.lock, flags); utcr3 = UART_GET_UTCR3(sport); if (break_state == -1) utcr3 |= UTCR3_BRK; else utcr3 &= ~UTCR3_BRK; UART_PUT_UTCR3(sport, utcr3); spin_unlock_irqrestore(&sport->port.lock, flags); } static int sa1100_startup(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; int retval; /* * Allocate the IRQ */ retval = request_irq(sport->port.irq, sa1100_int, 0, "sa11x0-uart", sport); if (retval) return retval; /* * Finally, clear and enable interrupts */ UART_PUT_UTSR0(sport, -1); UART_PUT_UTCR3(sport, UTCR3_RXE | UTCR3_TXE | UTCR3_RIE); /* * Enable modem status interrupts */ spin_lock_irq(&sport->port.lock); sa1100_enable_ms(&sport->port); spin_unlock_irq(&sport->port.lock); return 0; } static void sa1100_shutdown(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; /* * Stop our timer. */ del_timer_sync(&sport->timer); /* * Free the interrupt */ free_irq(sport->port.irq, sport); /* * Disable all interrupts, port and break condition. */ UART_PUT_UTCR3(sport, 0); } static void sa1100_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct sa1100_port *sport = (struct sa1100_port *)port; unsigned long flags; unsigned int utcr0, old_utcr3, baud, quot; unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; /* * We only support CS7 and CS8. */ while ((termios->c_cflag & CSIZE) != CS7 && (termios->c_cflag & CSIZE) != CS8) { termios->c_cflag &= ~CSIZE; termios->c_cflag |= old_csize; old_csize = CS8; } if ((termios->c_cflag & CSIZE) == CS8) utcr0 = UTCR0_DSS; else utcr0 = 0; if (termios->c_cflag & CSTOPB) utcr0 |= UTCR0_SBS; if (termios->c_cflag & PARENB) { utcr0 |= UTCR0_PE; if (!(termios->c_cflag & PARODD)) utcr0 |= UTCR0_OES; } /* * Ask the core to calculate the divisor for us. */ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); quot = uart_get_divisor(port, baud); spin_lock_irqsave(&sport->port.lock, flags); sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS); sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_ROR); if (termios->c_iflag & INPCK) sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE); if (termios->c_iflag & (BRKINT | PARMRK)) sport->port.read_status_mask |= UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB); /* * Characters to ignore */ sport->port.ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) sport->port.ignore_status_mask |= UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE); if (termios->c_iflag & IGNBRK) { sport->port.ignore_status_mask |= UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB); /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) sport->port.ignore_status_mask |= UTSR1_TO_SM(UTSR1_ROR); } del_timer_sync(&sport->timer); /* * Update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, baud); /* * disable interrupts and drain transmitter */ old_utcr3 = UART_GET_UTCR3(sport); UART_PUT_UTCR3(sport, old_utcr3 & ~(UTCR3_RIE | UTCR3_TIE)); while (UART_GET_UTSR1(sport) & UTSR1_TBY) barrier(); /* then, disable everything */ UART_PUT_UTCR3(sport, 0); /* set the parity, stop bits and data size */ UART_PUT_UTCR0(sport, utcr0); /* set the baud rate */ quot -= 1; UART_PUT_UTCR1(sport, ((quot & 0xf00) >> 8)); UART_PUT_UTCR2(sport, (quot & 0xff)); UART_PUT_UTSR0(sport, -1); UART_PUT_UTCR3(sport, old_utcr3); if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) sa1100_enable_ms(&sport->port); spin_unlock_irqrestore(&sport->port.lock, flags); } static const char *sa1100_type(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; return sport->port.type == PORT_SA1100 ? "SA1100" : NULL; } /* * Release the memory region(s) being used by 'port'. */ static void sa1100_release_port(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; release_mem_region(sport->port.mapbase, UART_PORT_SIZE); } /* * Request the memory region(s) being used by 'port'. */ static int sa1100_request_port(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; return request_mem_region(sport->port.mapbase, UART_PORT_SIZE, "sa11x0-uart") != NULL ? 0 : -EBUSY; } /* * Configure/autoconfigure the port. */ static void sa1100_config_port(struct uart_port *port, int flags) { struct sa1100_port *sport = (struct sa1100_port *)port; if (flags & UART_CONFIG_TYPE && sa1100_request_port(&sport->port) == 0) sport->port.type = PORT_SA1100; } /* * Verify the new serial_struct (for TIOCSSERIAL). * The only change we allow are to the flags and type, and * even then only between PORT_SA1100 and PORT_UNKNOWN */ static int sa1100_verify_port(struct uart_port *port, struct serial_struct *ser) { struct sa1100_port *sport = (struct sa1100_port *)port; int ret = 0; if (ser->type != PORT_UNKNOWN && ser->type != PORT_SA1100) ret = -EINVAL; if (sport->port.irq != ser->irq) ret = -EINVAL; if (ser->io_type != SERIAL_IO_MEM) ret = -EINVAL; if (sport->port.uartclk / 16 != ser->baud_base) ret = -EINVAL; if ((void *)sport->port.mapbase != ser->iomem_base) ret = -EINVAL; if (sport->port.iobase != ser->port) ret = -EINVAL; if (ser->hub6 != 0) ret = -EINVAL; return ret; } static struct uart_ops sa1100_pops = { .tx_empty = sa1100_tx_empty, .set_mctrl = sa1100_set_mctrl, .get_mctrl = sa1100_get_mctrl, .stop_tx = sa1100_stop_tx, .start_tx = sa1100_start_tx, .stop_rx = sa1100_stop_rx, .enable_ms = sa1100_enable_ms, .break_ctl = sa1100_break_ctl, .startup = sa1100_startup, .shutdown = sa1100_shutdown, .set_termios = sa1100_set_termios, .type = sa1100_type, .release_port = sa1100_release_port, .request_port = sa1100_request_port, .config_port = sa1100_config_port, .verify_port = sa1100_verify_port, }; static struct sa1100_port sa1100_ports[NR_PORTS]; /* * Setup the SA1100 serial ports. Note that we don't include the IrDA * port here since we have our own SIR/FIR driver (see drivers/net/irda) * * Note also that we support "console=ttySAx" where "x" is either 0 or 1. * Which serial port this ends up being depends on the machine you're * running this kernel on. I'm not convinced that this is a good idea, * but that's the way it traditionally works. * * Note that NanoEngine UART3 becomes UART2, and UART2 is no longer * used here. */ static void __init sa1100_init_ports(void) { static int first = 1; int i; if (!first) return; first = 0; for (i = 0; i < NR_PORTS; i++) { sa1100_ports[i].port.uartclk = 3686400; sa1100_ports[i].port.ops = &sa1100_pops; sa1100_ports[i].port.fifosize = 8; sa1100_ports[i].port.line = i; sa1100_ports[i].port.iotype = UPIO_MEM; init_timer(&sa1100_ports[i].timer); sa1100_ports[i].timer.function = sa1100_timeout; sa1100_ports[i].timer.data = (unsigned long)&sa1100_ports[i]; } /* * make transmit lines outputs, so that when the port * is closed, the output is in the MARK state. */ PPDR |= PPC_TXD1 | PPC_TXD3; PPSR |= PPC_TXD1 | PPC_TXD3; } void sa1100_register_uart_fns(struct sa1100_port_fns *fns) { if (fns->get_mctrl) sa1100_pops.get_mctrl = fns->get_mctrl; if (fns->set_mctrl) sa1100_pops.set_mctrl = fns->set_mctrl; sa1100_pops.pm = fns->pm; sa1100_pops.set_wake = fns->set_wake; } void __init sa1100_register_uart(int idx, int port) { if (idx >= NR_PORTS) { printk(KERN_ERR "%s: bad index number %d\n", __func__, idx); return; } switch (port) { case 1: sa1100_ports[idx].port.membase = (void __iomem *)&Ser1UTCR0; sa1100_ports[idx].port.mapbase = _Ser1UTCR0; sa1100_ports[idx].port.irq = IRQ_Ser1UART; sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF; break; case 2: sa1100_ports[idx].port.membase = (void __iomem *)&Ser2UTCR0; sa1100_ports[idx].port.mapbase = _Ser2UTCR0; sa1100_ports[idx].port.irq = IRQ_Ser2ICP; sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF; break; case 3: sa1100_ports[idx].port.membase = (void __iomem *)&Ser3UTCR0; sa1100_ports[idx].port.mapbase = _Ser3UTCR0; sa1100_ports[idx].port.irq = IRQ_Ser3UART; sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF; break; default: printk(KERN_ERR "%s: bad port number %d\n", __func__, port); } } #ifdef CONFIG_SERIAL_SA1100_CONSOLE static void sa1100_console_putchar(struct uart_port *port, int ch) { struct sa1100_port *sport = (struct sa1100_port *)port; while (!(UART_GET_UTSR1(sport) & UTSR1_TNF)) barrier(); UART_PUT_CHAR(sport, ch); } /* * Interrupts are disabled on entering */ static void sa1100_console_write(struct console *co, const char *s, unsigned int count) { struct sa1100_port *sport = &sa1100_ports[co->index]; unsigned int old_utcr3, status; /* * First, save UTCR3 and then disable interrupts */ old_utcr3 = UART_GET_UTCR3(sport); UART_PUT_UTCR3(sport, (old_utcr3 & ~(UTCR3_RIE | UTCR3_TIE)) | UTCR3_TXE); uart_console_write(&sport->port, s, count, sa1100_console_putchar); /* * Finally, wait for transmitter to become empty * and restore UTCR3 */ do { status = UART_GET_UTSR1(sport); } while (status & UTSR1_TBY); UART_PUT_UTCR3(sport, old_utcr3); } /* * If the port was already initialised (eg, by a boot loader), * try to determine the current setup. */ static void __init sa1100_console_get_options(struct sa1100_port *sport, int *baud, int *parity, int *bits) { unsigned int utcr3; utcr3 = UART_GET_UTCR3(sport) & (UTCR3_RXE | UTCR3_TXE); if (utcr3 == (UTCR3_RXE | UTCR3_TXE)) { /* ok, the port was enabled */ unsigned int utcr0, quot; utcr0 = UART_GET_UTCR0(sport); *parity = 'n'; if (utcr0 & UTCR0_PE) { if (utcr0 & UTCR0_OES) *parity = 'e'; else *parity = 'o'; } if (utcr0 & UTCR0_DSS) *bits = 8; else *bits = 7; quot = UART_GET_UTCR2(sport) | UART_GET_UTCR1(sport) << 8; quot &= 0xfff; *baud = sport->port.uartclk / (16 * (quot + 1)); } } static int __init sa1100_console_setup(struct console *co, char *options) { struct sa1100_port *sport; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; /* * Check whether an invalid uart number has been specified, and * if so, search for the first available port that does have * console support. */ if (co->index == -1 || co->index >= NR_PORTS) co->index = 0; sport = &sa1100_ports[co->index]; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else sa1100_console_get_options(sport, &baud, &parity, &bits); return uart_set_options(&sport->port, co, baud, parity, bits, flow); } static struct uart_driver sa1100_reg; static struct console sa1100_console = { .name = "ttySA", .write = sa1100_console_write, .device = uart_console_device, .setup = sa1100_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &sa1100_reg, }; static int __init sa1100_rs_console_init(void) { sa1100_init_ports(); register_console(&sa1100_console); return 0; } console_initcall(sa1100_rs_console_init); #define SA1100_CONSOLE &sa1100_console #else #define SA1100_CONSOLE NULL #endif static struct uart_driver sa1100_reg = { .owner = THIS_MODULE, .driver_name = "ttySA", .dev_name = "ttySA", .major = SERIAL_SA1100_MAJOR, .minor = MINOR_START, .nr = NR_PORTS, .cons = SA1100_CONSOLE, }; static int sa1100_serial_suspend(struct platform_device *dev, pm_message_t state) { struct sa1100_port *sport = platform_get_drvdata(dev); if (sport) uart_suspend_port(&sa1100_reg, &sport->port); return 0; } static int sa1100_serial_resume(struct platform_device *dev) { struct sa1100_port *sport = platform_get_drvdata(dev); if (sport) uart_resume_port(&sa1100_reg, &sport->port); return 0; } static int sa1100_serial_probe(struct platform_device *dev) { struct resource *res = dev->resource; int i; for (i = 0; i < dev->num_resources; i++, res++) if (res->flags & IORESOURCE_MEM) break; if (i < dev->num_resources) { for (i = 0; i < NR_PORTS; i++) { if (sa1100_ports[i].port.mapbase != res->start) continue; sa1100_ports[i].port.dev = &dev->dev; uart_add_one_port(&sa1100_reg, &sa1100_ports[i].port); platform_set_drvdata(dev, &sa1100_ports[i]); break; } } return 0; } static int sa1100_serial_remove(struct platform_device *pdev) { struct sa1100_port *sport = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); if (sport) uart_remove_one_port(&sa1100_reg, &sport->port); return 0; } static struct platform_driver sa11x0_serial_driver = { .probe = sa1100_serial_probe, .remove = sa1100_serial_remove, .suspend = sa1100_serial_suspend, .resume = sa1100_serial_resume, .driver = { .name = "sa11x0-uart", .owner = THIS_MODULE, }, }; static int __init sa1100_serial_init(void) { int ret; printk(KERN_INFO "Serial: SA11x0 driver\n"); sa1100_init_ports(); ret = uart_register_driver(&sa1100_reg); if (ret == 0) { ret = platform_driver_register(&sa11x0_serial_driver); if (ret) uart_unregister_driver(&sa1100_reg); } return ret; } static void __exit sa1100_serial_exit(void) { platform_driver_unregister(&sa11x0_serial_driver); uart_unregister_driver(&sa1100_reg); } module_init(sa1100_serial_init); module_exit(sa1100_serial_exit); MODULE_AUTHOR("Deep Blue Solutions Ltd"); MODULE_DESCRIPTION("SA1100 generic serial port driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_SA1100_MAJOR); MODULE_ALIAS("platform:sa11x0-uart");
gpl-2.0
DroidThug/kernel_delta_msm8916
net/iucv/iucv.c
2123
54974
/* * IUCV base infrastructure. * * Copyright IBM Corp. 2001, 2009 * * Author(s): * Original source: * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 * Xenia Tkatschow (xenia@us.ibm.com) * 2Gb awareness and general cleanup: * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) * Rewritten for af_iucv: * Martin Schwidefsky <schwidefsky@de.ibm.com> * PM functions: * Ursula Braun (ursula.braun@de.ibm.com) * * Documentation used: * The original source * CP Programming Service, IBM document # SC24-5760 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define KMSG_COMPONENT "iucv" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/spinlock.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/device.h> #include <linux/cpu.h> #include <linux/reboot.h> #include <net/iucv/iucv.h> #include <linux/atomic.h> #include <asm/ebcdic.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/smp.h> /* * FLAGS: * All flags are defined in the field IPFLAGS1 of each function * and can be found in CP Programming Services. * IPSRCCLS - Indicates you have specified a source class. * IPTRGCLS - Indicates you have specified a target class. * IPFGPID - Indicates you have specified a pathid. * IPFGMID - Indicates you have specified a message ID. * IPNORPY - Indicates a one-way message. No reply expected. * IPALL - Indicates that all paths are affected. */ #define IUCV_IPSRCCLS 0x01 #define IUCV_IPTRGCLS 0x01 #define IUCV_IPFGPID 0x02 #define IUCV_IPFGMID 0x04 #define IUCV_IPNORPY 0x10 #define IUCV_IPALL 0x80 static int iucv_bus_match(struct device *dev, struct device_driver *drv) { return 0; } enum iucv_pm_states { IUCV_PM_INITIAL = 0, IUCV_PM_FREEZING = 1, IUCV_PM_THAWING = 2, IUCV_PM_RESTORING = 3, }; static enum iucv_pm_states iucv_pm_state; static int iucv_pm_prepare(struct device *); static void iucv_pm_complete(struct device *); static int iucv_pm_freeze(struct device *); static int iucv_pm_thaw(struct device *); static int iucv_pm_restore(struct device *); static const struct dev_pm_ops iucv_pm_ops = { .prepare = iucv_pm_prepare, .complete = iucv_pm_complete, .freeze = iucv_pm_freeze, .thaw = iucv_pm_thaw, .restore = iucv_pm_restore, }; struct bus_type iucv_bus = { .name = "iucv", .match = iucv_bus_match, .pm = &iucv_pm_ops, }; EXPORT_SYMBOL(iucv_bus); struct device *iucv_root; EXPORT_SYMBOL(iucv_root); static int iucv_available; /* General IUCV interrupt structure */ struct iucv_irq_data { u16 ippathid; u8 ipflags1; u8 iptype; u32 res2[8]; }; struct iucv_irq_list { struct list_head list; struct iucv_irq_data data; }; static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE }; static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE }; /* * Queue of interrupt buffers lock for delivery via the tasklet * (fast but can't call smp_call_function). */ static LIST_HEAD(iucv_task_queue); /* * The tasklet for fast delivery of iucv interrupts. */ static void iucv_tasklet_fn(unsigned long); static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0); /* * Queue of interrupt buffers for delivery via a work queue * (slower but can call smp_call_function). */ static LIST_HEAD(iucv_work_queue); /* * The work element to deliver path pending interrupts. */ static void iucv_work_fn(struct work_struct *work); static DECLARE_WORK(iucv_work, iucv_work_fn); /* * Spinlock protecting task and work queue. */ static DEFINE_SPINLOCK(iucv_queue_lock); enum iucv_command_codes { IUCV_QUERY = 0, IUCV_RETRIEVE_BUFFER = 2, IUCV_SEND = 4, IUCV_RECEIVE = 5, IUCV_REPLY = 6, IUCV_REJECT = 8, IUCV_PURGE = 9, IUCV_ACCEPT = 10, IUCV_CONNECT = 11, IUCV_DECLARE_BUFFER = 12, IUCV_QUIESCE = 13, IUCV_RESUME = 14, IUCV_SEVER = 15, IUCV_SETMASK = 16, IUCV_SETCONTROLMASK = 17, }; /* * Error messages that are used with the iucv_sever function. They get * converted to EBCDIC. */ static char iucv_error_no_listener[16] = "NO LISTENER"; static char iucv_error_no_memory[16] = "NO MEMORY"; static char iucv_error_pathid[16] = "INVALID PATHID"; /* * iucv_handler_list: List of registered handlers. */ static LIST_HEAD(iucv_handler_list); /* * iucv_path_table: an array of iucv_path structures. */ static struct iucv_path **iucv_path_table; static unsigned long iucv_max_pathid; /* * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table */ static DEFINE_SPINLOCK(iucv_table_lock); /* * iucv_active_cpu: contains the number of the cpu executing the tasklet * or the work handler. Needed for iucv_path_sever called from tasklet. */ static int iucv_active_cpu = -1; /* * Mutex and wait queue for iucv_register/iucv_unregister. */ static DEFINE_MUTEX(iucv_register_mutex); /* * Counter for number of non-smp capable handlers. */ static int iucv_nonsmp_handler; /* * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect, * iucv_path_quiesce and iucv_path_sever. */ struct iucv_cmd_control { u16 ippathid; u8 ipflags1; u8 iprcode; u16 ipmsglim; u16 res1; u8 ipvmid[8]; u8 ipuser[16]; u8 iptarget[8]; } __attribute__ ((packed,aligned(8))); /* * Data in parameter list iucv structure. Used by iucv_message_send, * iucv_message_send2way and iucv_message_reply. */ struct iucv_cmd_dpl { u16 ippathid; u8 ipflags1; u8 iprcode; u32 ipmsgid; u32 iptrgcls; u8 iprmmsg[8]; u32 ipsrccls; u32 ipmsgtag; u32 ipbfadr2; u32 ipbfln2f; u32 res; } __attribute__ ((packed,aligned(8))); /* * Data in buffer iucv structure. Used by iucv_message_receive, * iucv_message_reject, iucv_message_send, iucv_message_send2way * and iucv_declare_cpu. */ struct iucv_cmd_db { u16 ippathid; u8 ipflags1; u8 iprcode; u32 ipmsgid; u32 iptrgcls; u32 ipbfadr1; u32 ipbfln1f; u32 ipsrccls; u32 ipmsgtag; u32 ipbfadr2; u32 ipbfln2f; u32 res; } __attribute__ ((packed,aligned(8))); /* * Purge message iucv structure. Used by iucv_message_purge. */ struct iucv_cmd_purge { u16 ippathid; u8 ipflags1; u8 iprcode; u32 ipmsgid; u8 ipaudit[3]; u8 res1[5]; u32 res2; u32 ipsrccls; u32 ipmsgtag; u32 res3[3]; } __attribute__ ((packed,aligned(8))); /* * Set mask iucv structure. Used by iucv_enable_cpu. */ struct iucv_cmd_set_mask { u8 ipmask; u8 res1[2]; u8 iprcode; u32 res2[9]; } __attribute__ ((packed,aligned(8))); union iucv_param { struct iucv_cmd_control ctrl; struct iucv_cmd_dpl dpl; struct iucv_cmd_db db; struct iucv_cmd_purge purge; struct iucv_cmd_set_mask set_mask; }; /* * Anchor for per-cpu IUCV command parameter block. */ static union iucv_param *iucv_param[NR_CPUS]; static union iucv_param *iucv_param_irq[NR_CPUS]; /** * iucv_call_b2f0 * @code: identifier of IUCV call to CP. * @parm: pointer to a struct iucv_parm block * * Calls CP to execute IUCV commands. * * Returns the result of the CP IUCV call. */ static inline int iucv_call_b2f0(int command, union iucv_param *parm) { register unsigned long reg0 asm ("0"); register unsigned long reg1 asm ("1"); int ccode; reg0 = command; reg1 = virt_to_phys(parm); asm volatile( " .long 0xb2f01000\n" " ipm %0\n" " srl %0,28\n" : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1) : "m" (*parm) : "cc"); return (ccode == 1) ? parm->ctrl.iprcode : ccode; } /** * iucv_query_maxconn * * Determines the maximum number of connections that may be established. * * Returns the maximum number of connections or -EPERM is IUCV is not * available. */ static int iucv_query_maxconn(void) { register unsigned long reg0 asm ("0"); register unsigned long reg1 asm ("1"); void *param; int ccode; param = kzalloc(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA); if (!param) return -ENOMEM; reg0 = IUCV_QUERY; reg1 = (unsigned long) param; asm volatile ( " .long 0xb2f01000\n" " ipm %0\n" " srl %0,28\n" : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); if (ccode == 0) iucv_max_pathid = reg1; kfree(param); return ccode ? -EPERM : 0; } /** * iucv_allow_cpu * @data: unused * * Allow iucv interrupts on this cpu. */ static void iucv_allow_cpu(void *data) { int cpu = smp_processor_id(); union iucv_param *parm; /* * Enable all iucv interrupts. * ipmask contains bits for the different interrupts * 0x80 - Flag to allow nonpriority message pending interrupts * 0x40 - Flag to allow priority message pending interrupts * 0x20 - Flag to allow nonpriority message completion interrupts * 0x10 - Flag to allow priority message completion interrupts * 0x08 - Flag to allow IUCV control interrupts */ parm = iucv_param_irq[cpu]; memset(parm, 0, sizeof(union iucv_param)); parm->set_mask.ipmask = 0xf8; iucv_call_b2f0(IUCV_SETMASK, parm); /* * Enable all iucv control interrupts. * ipmask contains bits for the different interrupts * 0x80 - Flag to allow pending connections interrupts * 0x40 - Flag to allow connection complete interrupts * 0x20 - Flag to allow connection severed interrupts * 0x10 - Flag to allow connection quiesced interrupts * 0x08 - Flag to allow connection resumed interrupts */ memset(parm, 0, sizeof(union iucv_param)); parm->set_mask.ipmask = 0xf8; iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); /* Set indication that iucv interrupts are allowed for this cpu. */ cpumask_set_cpu(cpu, &iucv_irq_cpumask); } /** * iucv_block_cpu * @data: unused * * Block iucv interrupts on this cpu. */ static void iucv_block_cpu(void *data) { int cpu = smp_processor_id(); union iucv_param *parm; /* Disable all iucv interrupts. */ parm = iucv_param_irq[cpu]; memset(parm, 0, sizeof(union iucv_param)); iucv_call_b2f0(IUCV_SETMASK, parm); /* Clear indication that iucv interrupts are allowed for this cpu. */ cpumask_clear_cpu(cpu, &iucv_irq_cpumask); } /** * iucv_block_cpu_almost * @data: unused * * Allow connection-severed interrupts only on this cpu. */ static void iucv_block_cpu_almost(void *data) { int cpu = smp_processor_id(); union iucv_param *parm; /* Allow iucv control interrupts only */ parm = iucv_param_irq[cpu]; memset(parm, 0, sizeof(union iucv_param)); parm->set_mask.ipmask = 0x08; iucv_call_b2f0(IUCV_SETMASK, parm); /* Allow iucv-severed interrupt only */ memset(parm, 0, sizeof(union iucv_param)); parm->set_mask.ipmask = 0x20; iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); /* Clear indication that iucv interrupts are allowed for this cpu. */ cpumask_clear_cpu(cpu, &iucv_irq_cpumask); } /** * iucv_declare_cpu * @data: unused * * Declare a interrupt buffer on this cpu. */ static void iucv_declare_cpu(void *data) { int cpu = smp_processor_id(); union iucv_param *parm; int rc; if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) return; /* Declare interrupt buffer. */ parm = iucv_param_irq[cpu]; memset(parm, 0, sizeof(union iucv_param)); parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); if (rc) { char *err = "Unknown"; switch (rc) { case 0x03: err = "Directory error"; break; case 0x0a: err = "Invalid length"; break; case 0x13: err = "Buffer already exists"; break; case 0x3e: err = "Buffer overlap"; break; case 0x5c: err = "Paging or storage error"; break; } pr_warning("Defining an interrupt buffer on CPU %i" " failed with 0x%02x (%s)\n", cpu, rc, err); return; } /* Set indication that an iucv buffer exists for this cpu. */ cpumask_set_cpu(cpu, &iucv_buffer_cpumask); if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask)) /* Enable iucv interrupts on this cpu. */ iucv_allow_cpu(NULL); else /* Disable iucv interrupts on this cpu. */ iucv_block_cpu(NULL); } /** * iucv_retrieve_cpu * @data: unused * * Retrieve interrupt buffer on this cpu. */ static void iucv_retrieve_cpu(void *data) { int cpu = smp_processor_id(); union iucv_param *parm; if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) return; /* Block iucv interrupts. */ iucv_block_cpu(NULL); /* Retrieve interrupt buffer. */ parm = iucv_param_irq[cpu]; iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); /* Clear indication that an iucv buffer exists for this cpu. */ cpumask_clear_cpu(cpu, &iucv_buffer_cpumask); } /** * iucv_setmask_smp * * Allow iucv interrupts on all cpus. */ static void iucv_setmask_mp(void) { int cpu; get_online_cpus(); for_each_online_cpu(cpu) /* Enable all cpus with a declared buffer. */ if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) && !cpumask_test_cpu(cpu, &iucv_irq_cpumask)) smp_call_function_single(cpu, iucv_allow_cpu, NULL, 1); put_online_cpus(); } /** * iucv_setmask_up * * Allow iucv interrupts on a single cpu. */ static void iucv_setmask_up(void) { cpumask_t cpumask; int cpu; /* Disable all cpu but the first in cpu_irq_cpumask. */ cpumask_copy(&cpumask, &iucv_irq_cpumask); cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask); for_each_cpu(cpu, &cpumask) smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); } /** * iucv_enable * * This function makes iucv ready for use. It allocates the pathid * table, declares an iucv interrupt buffer and enables the iucv * interrupts. Called when the first user has registered an iucv * handler. */ static int iucv_enable(void) { size_t alloc_size; int cpu, rc; get_online_cpus(); rc = -ENOMEM; alloc_size = iucv_max_pathid * sizeof(struct iucv_path); iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); if (!iucv_path_table) goto out; /* Declare per cpu buffers. */ rc = -EIO; for_each_online_cpu(cpu) smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); if (cpumask_empty(&iucv_buffer_cpumask)) /* No cpu could declare an iucv buffer. */ goto out; put_online_cpus(); return 0; out: kfree(iucv_path_table); iucv_path_table = NULL; put_online_cpus(); return rc; } /** * iucv_disable * * This function shuts down iucv. It disables iucv interrupts, retrieves * the iucv interrupt buffer and frees the pathid table. Called after the * last user unregister its iucv handler. */ static void iucv_disable(void) { get_online_cpus(); on_each_cpu(iucv_retrieve_cpu, NULL, 1); kfree(iucv_path_table); iucv_path_table = NULL; put_online_cpus(); } static int __cpuinit iucv_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { cpumask_t cpumask; long cpu = (long) hcpu; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_irq_data[cpu]) return notifier_from_errno(-ENOMEM); iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_param[cpu]) { kfree(iucv_irq_data[cpu]); iucv_irq_data[cpu] = NULL; return notifier_from_errno(-ENOMEM); } iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_param_irq[cpu]) { kfree(iucv_param[cpu]); iucv_param[cpu] = NULL; kfree(iucv_irq_data[cpu]); iucv_irq_data[cpu] = NULL; return notifier_from_errno(-ENOMEM); } break; case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: kfree(iucv_param_irq[cpu]); iucv_param_irq[cpu] = NULL; kfree(iucv_param[cpu]); iucv_param[cpu] = NULL; kfree(iucv_irq_data[cpu]); iucv_irq_data[cpu] = NULL; break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: if (!iucv_path_table) break; smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: if (!iucv_path_table) break; cpumask_copy(&cpumask, &iucv_buffer_cpumask); cpumask_clear_cpu(cpu, &cpumask); if (cpumask_empty(&cpumask)) /* Can't offline last IUCV enabled cpu. */ return notifier_from_errno(-EINVAL); smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); if (cpumask_empty(&iucv_irq_cpumask)) smp_call_function_single( cpumask_first(&iucv_buffer_cpumask), iucv_allow_cpu, NULL, 1); break; } return NOTIFY_OK; } static struct notifier_block __refdata iucv_cpu_notifier = { .notifier_call = iucv_cpu_notify, }; /** * iucv_sever_pathid * @pathid: path identification number. * @userdata: 16-bytes of user data. * * Sever an iucv path to free up the pathid. Used internally. */ static int iucv_sever_pathid(u16 pathid, u8 userdata[16]) { union iucv_param *parm; parm = iucv_param_irq[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (userdata) memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); parm->ctrl.ippathid = pathid; return iucv_call_b2f0(IUCV_SEVER, parm); } /** * __iucv_cleanup_queue * @dummy: unused dummy argument * * Nop function called via smp_call_function to force work items from * pending external iucv interrupts to the work queue. */ static void __iucv_cleanup_queue(void *dummy) { } /** * iucv_cleanup_queue * * Function called after a path has been severed to find all remaining * work items for the now stale pathid. The caller needs to hold the * iucv_table_lock. */ static void iucv_cleanup_queue(void) { struct iucv_irq_list *p, *n; /* * When a path is severed, the pathid can be reused immediately * on a iucv connect or a connection pending interrupt. Remove * all entries from the task queue that refer to a stale pathid * (iucv_path_table[ix] == NULL). Only then do the iucv connect * or deliver the connection pending interrupt. To get all the * pending interrupts force them to the work queue by calling * an empty function on all cpus. */ smp_call_function(__iucv_cleanup_queue, NULL, 1); spin_lock_irq(&iucv_queue_lock); list_for_each_entry_safe(p, n, &iucv_task_queue, list) { /* Remove stale work items from the task queue. */ if (iucv_path_table[p->data.ippathid] == NULL) { list_del(&p->list); kfree(p); } } spin_unlock_irq(&iucv_queue_lock); } /** * iucv_register: * @handler: address of iucv handler structure * @smp: != 0 indicates that the handler can deal with out of order messages * * Registers a driver with IUCV. * * Returns 0 on success, -ENOMEM if the memory allocation for the pathid * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus. */ int iucv_register(struct iucv_handler *handler, int smp) { int rc; if (!iucv_available) return -ENOSYS; mutex_lock(&iucv_register_mutex); if (!smp) iucv_nonsmp_handler++; if (list_empty(&iucv_handler_list)) { rc = iucv_enable(); if (rc) goto out_mutex; } else if (!smp && iucv_nonsmp_handler == 1) iucv_setmask_up(); INIT_LIST_HEAD(&handler->paths); spin_lock_bh(&iucv_table_lock); list_add_tail(&handler->list, &iucv_handler_list); spin_unlock_bh(&iucv_table_lock); rc = 0; out_mutex: mutex_unlock(&iucv_register_mutex); return rc; } EXPORT_SYMBOL(iucv_register); /** * iucv_unregister * @handler: address of iucv handler structure * @smp: != 0 indicates that the handler can deal with out of order messages * * Unregister driver from IUCV. */ void iucv_unregister(struct iucv_handler *handler, int smp) { struct iucv_path *p, *n; mutex_lock(&iucv_register_mutex); spin_lock_bh(&iucv_table_lock); /* Remove handler from the iucv_handler_list. */ list_del_init(&handler->list); /* Sever all pathids still referring to the handler. */ list_for_each_entry_safe(p, n, &handler->paths, list) { iucv_sever_pathid(p->pathid, NULL); iucv_path_table[p->pathid] = NULL; list_del(&p->list); iucv_path_free(p); } spin_unlock_bh(&iucv_table_lock); if (!smp) iucv_nonsmp_handler--; if (list_empty(&iucv_handler_list)) iucv_disable(); else if (!smp && iucv_nonsmp_handler == 0) iucv_setmask_mp(); mutex_unlock(&iucv_register_mutex); } EXPORT_SYMBOL(iucv_unregister); static int iucv_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) { int i; if (cpumask_empty(&iucv_irq_cpumask)) return NOTIFY_DONE; get_online_cpus(); on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1); preempt_disable(); for (i = 0; i < iucv_max_pathid; i++) { if (iucv_path_table[i]) iucv_sever_pathid(i, NULL); } preempt_enable(); put_online_cpus(); iucv_disable(); return NOTIFY_DONE; } static struct notifier_block iucv_reboot_notifier = { .notifier_call = iucv_reboot_event, }; /** * iucv_path_accept * @path: address of iucv path structure * @handler: address of iucv handler structure * @userdata: 16 bytes of data reflected to the communication partner * @private: private data passed to interrupt handlers for this path * * This function is issued after the user received a connection pending * external interrupt and now wishes to complete the IUCV communication path. * * Returns the result of the CP IUCV call. */ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, u8 userdata[16], void *private) { union iucv_param *parm; int rc; local_bh_disable(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } /* Prepare parameter block. */ parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->ctrl.ippathid = path->pathid; parm->ctrl.ipmsglim = path->msglim; if (userdata) memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); parm->ctrl.ipflags1 = path->flags; rc = iucv_call_b2f0(IUCV_ACCEPT, parm); if (!rc) { path->private = private; path->msglim = parm->ctrl.ipmsglim; path->flags = parm->ctrl.ipflags1; } out: local_bh_enable(); return rc; } EXPORT_SYMBOL(iucv_path_accept); /** * iucv_path_connect * @path: address of iucv path structure * @handler: address of iucv handler structure * @userid: 8-byte user identification * @system: 8-byte target system identification * @userdata: 16 bytes of data reflected to the communication partner * @private: private data passed to interrupt handlers for this path * * This function establishes an IUCV path. Although the connect may complete * successfully, you are not able to use the path until you receive an IUCV * Connection Complete external interrupt. * * Returns the result of the CP IUCV call. */ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, u8 userid[8], u8 system[8], u8 userdata[16], void *private) { union iucv_param *parm; int rc; spin_lock_bh(&iucv_table_lock); iucv_cleanup_queue(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->ctrl.ipmsglim = path->msglim; parm->ctrl.ipflags1 = path->flags; if (userid) { memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid)); ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); } if (system) { memcpy(parm->ctrl.iptarget, system, sizeof(parm->ctrl.iptarget)); ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); } if (userdata) memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); rc = iucv_call_b2f0(IUCV_CONNECT, parm); if (!rc) { if (parm->ctrl.ippathid < iucv_max_pathid) { path->pathid = parm->ctrl.ippathid; path->msglim = parm->ctrl.ipmsglim; path->flags = parm->ctrl.ipflags1; path->handler = handler; path->private = private; list_add_tail(&path->list, &handler->paths); iucv_path_table[path->pathid] = path; } else { iucv_sever_pathid(parm->ctrl.ippathid, iucv_error_pathid); rc = -EIO; } } out: spin_unlock_bh(&iucv_table_lock); return rc; } EXPORT_SYMBOL(iucv_path_connect); /** * iucv_path_quiesce: * @path: address of iucv path structure * @userdata: 16 bytes of data reflected to the communication partner * * This function temporarily suspends incoming messages on an IUCV path. * You can later reactivate the path by invoking the iucv_resume function. * * Returns the result from the CP IUCV call. */ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]) { union iucv_param *parm; int rc; local_bh_disable(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (userdata) memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); parm->ctrl.ippathid = path->pathid; rc = iucv_call_b2f0(IUCV_QUIESCE, parm); out: local_bh_enable(); return rc; } EXPORT_SYMBOL(iucv_path_quiesce); /** * iucv_path_resume: * @path: address of iucv path structure * @userdata: 16 bytes of data reflected to the communication partner * * This function resumes incoming messages on an IUCV path that has * been stopped with iucv_path_quiesce. * * Returns the result from the CP IUCV call. */ int iucv_path_resume(struct iucv_path *path, u8 userdata[16]) { union iucv_param *parm; int rc; local_bh_disable(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (userdata) memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); parm->ctrl.ippathid = path->pathid; rc = iucv_call_b2f0(IUCV_RESUME, parm); out: local_bh_enable(); return rc; } /** * iucv_path_sever * @path: address of iucv path structure * @userdata: 16 bytes of data reflected to the communication partner * * This function terminates an IUCV path. * * Returns the result from the CP IUCV call. */ int iucv_path_sever(struct iucv_path *path, u8 userdata[16]) { int rc; preempt_disable(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } if (iucv_active_cpu != smp_processor_id()) spin_lock_bh(&iucv_table_lock); rc = iucv_sever_pathid(path->pathid, userdata); iucv_path_table[path->pathid] = NULL; list_del_init(&path->list); if (iucv_active_cpu != smp_processor_id()) spin_unlock_bh(&iucv_table_lock); out: preempt_enable(); return rc; } EXPORT_SYMBOL(iucv_path_sever); /** * iucv_message_purge * @path: address of iucv path structure * @msg: address of iucv msg structure * @srccls: source class of message * * Cancels a message you have sent. * * Returns the result from the CP IUCV call. */ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, u32 srccls) { union iucv_param *parm; int rc; local_bh_disable(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->purge.ippathid = path->pathid; parm->purge.ipmsgid = msg->id; parm->purge.ipsrccls = srccls; parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID; rc = iucv_call_b2f0(IUCV_PURGE, parm); if (!rc) { msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; msg->tag = parm->purge.ipmsgtag; } out: local_bh_enable(); return rc; } EXPORT_SYMBOL(iucv_message_purge); /** * iucv_message_receive_iprmdata * @path: address of iucv path structure * @msg: address of iucv msg structure * @flags: how the message is received (IUCV_IPBUFLST) * @buffer: address of data buffer or address of struct iucv_array * @size: length of data buffer * @residual: * * Internal function used by iucv_message_receive and __iucv_message_receive * to receive RMDATA data stored in struct iucv_message. */ static int iucv_message_receive_iprmdata(struct iucv_path *path, struct iucv_message *msg, u8 flags, void *buffer, size_t size, size_t *residual) { struct iucv_array *array; u8 *rmmsg; size_t copy; /* * Message is 8 bytes long and has been stored to the * message descriptor itself. */ if (residual) *residual = abs(size - 8); rmmsg = msg->rmmsg; if (flags & IUCV_IPBUFLST) { /* Copy to struct iucv_array. */ size = (size < 8) ? size : 8; for (array = buffer; size > 0; array++) { copy = min_t(size_t, size, array->length); memcpy((u8 *)(addr_t) array->address, rmmsg, copy); rmmsg += copy; size -= copy; } } else { /* Copy to direct buffer. */ memcpy(buffer, rmmsg, min_t(size_t, size, 8)); } return 0; } /** * __iucv_message_receive * @path: address of iucv path structure * @msg: address of iucv msg structure * @flags: how the message is received (IUCV_IPBUFLST) * @buffer: address of data buffer or address of struct iucv_array * @size: length of data buffer * @residual: * * This function receives messages that are being sent to you over * established paths. This function will deal with RMDATA messages * embedded in struct iucv_message as well. * * Locking: no locking * * Returns the result from the CP IUCV call. */ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, u8 flags, void *buffer, size_t size, size_t *residual) { union iucv_param *parm; int rc; if (msg->flags & IUCV_IPRMDATA) return iucv_message_receive_iprmdata(path, msg, flags, buffer, size, residual); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->db.ipbfadr1 = (u32)(addr_t) buffer; parm->db.ipbfln1f = (u32) size; parm->db.ipmsgid = msg->id; parm->db.ippathid = path->pathid; parm->db.iptrgcls = msg->class; parm->db.ipflags1 = (flags | IUCV_IPFGPID | IUCV_IPFGMID | IUCV_IPTRGCLS); rc = iucv_call_b2f0(IUCV_RECEIVE, parm); if (!rc || rc == 5) { msg->flags = parm->db.ipflags1; if (residual) *residual = parm->db.ipbfln1f; } out: return rc; } EXPORT_SYMBOL(__iucv_message_receive); /** * iucv_message_receive * @path: address of iucv path structure * @msg: address of iucv msg structure * @flags: how the message is received (IUCV_IPBUFLST) * @buffer: address of data buffer or address of struct iucv_array * @size: length of data buffer * @residual: * * This function receives messages that are being sent to you over * established paths. This function will deal with RMDATA messages * embedded in struct iucv_message as well. * * Locking: local_bh_enable/local_bh_disable * * Returns the result from the CP IUCV call. */ int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, u8 flags, void *buffer, size_t size, size_t *residual) { int rc; if (msg->flags & IUCV_IPRMDATA) return iucv_message_receive_iprmdata(path, msg, flags, buffer, size, residual); local_bh_disable(); rc = __iucv_message_receive(path, msg, flags, buffer, size, residual); local_bh_enable(); return rc; } EXPORT_SYMBOL(iucv_message_receive); /** * iucv_message_reject * @path: address of iucv path structure * @msg: address of iucv msg structure * * The reject function refuses a specified message. Between the time you * are notified of a message and the time that you complete the message, * the message may be rejected. * * Returns the result from the CP IUCV call. */ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) { union iucv_param *parm; int rc; local_bh_disable(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->db.ippathid = path->pathid; parm->db.ipmsgid = msg->id; parm->db.iptrgcls = msg->class; parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); rc = iucv_call_b2f0(IUCV_REJECT, parm); out: local_bh_enable(); return rc; } EXPORT_SYMBOL(iucv_message_reject); /** * iucv_message_reply * @path: address of iucv path structure * @msg: address of iucv msg structure * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) * @reply: address of reply data buffer or address of struct iucv_array * @size: length of reply data buffer * * This function responds to the two-way messages that you receive. You * must identify completely the message to which you wish to reply. ie, * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into * the parameter list. * * Returns the result from the CP IUCV call. */ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, u8 flags, void *reply, size_t size) { union iucv_param *parm; int rc; local_bh_disable(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (flags & IUCV_IPRMDATA) { parm->dpl.ippathid = path->pathid; parm->dpl.ipflags1 = flags; parm->dpl.ipmsgid = msg->id; parm->dpl.iptrgcls = msg->class; memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8)); } else { parm->db.ipbfadr1 = (u32)(addr_t) reply; parm->db.ipbfln1f = (u32) size; parm->db.ippathid = path->pathid; parm->db.ipflags1 = flags; parm->db.ipmsgid = msg->id; parm->db.iptrgcls = msg->class; } rc = iucv_call_b2f0(IUCV_REPLY, parm); out: local_bh_enable(); return rc; } EXPORT_SYMBOL(iucv_message_reply); /** * __iucv_message_send * @path: address of iucv path structure * @msg: address of iucv msg structure * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) * @srccls: source class of message * @buffer: address of send buffer or address of struct iucv_array * @size: length of send buffer * * This function transmits data to another application. Data to be * transmitted is in a buffer and this is a one-way message and the * receiver will not reply to the message. * * Locking: no locking * * Returns the result from the CP IUCV call. */ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, u8 flags, u32 srccls, void *buffer, size_t size) { union iucv_param *parm; int rc; if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (flags & IUCV_IPRMDATA) { /* Message of 8 bytes can be placed into the parameter list. */ parm->dpl.ippathid = path->pathid; parm->dpl.ipflags1 = flags | IUCV_IPNORPY; parm->dpl.iptrgcls = msg->class; parm->dpl.ipsrccls = srccls; parm->dpl.ipmsgtag = msg->tag; memcpy(parm->dpl.iprmmsg, buffer, 8); } else { parm->db.ipbfadr1 = (u32)(addr_t) buffer; parm->db.ipbfln1f = (u32) size; parm->db.ippathid = path->pathid; parm->db.ipflags1 = flags | IUCV_IPNORPY; parm->db.iptrgcls = msg->class; parm->db.ipsrccls = srccls; parm->db.ipmsgtag = msg->tag; } rc = iucv_call_b2f0(IUCV_SEND, parm); if (!rc) msg->id = parm->db.ipmsgid; out: return rc; } EXPORT_SYMBOL(__iucv_message_send); /** * iucv_message_send * @path: address of iucv path structure * @msg: address of iucv msg structure * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) * @srccls: source class of message * @buffer: address of send buffer or address of struct iucv_array * @size: length of send buffer * * This function transmits data to another application. Data to be * transmitted is in a buffer and this is a one-way message and the * receiver will not reply to the message. * * Locking: local_bh_enable/local_bh_disable * * Returns the result from the CP IUCV call. */ int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, u8 flags, u32 srccls, void *buffer, size_t size) { int rc; local_bh_disable(); rc = __iucv_message_send(path, msg, flags, srccls, buffer, size); local_bh_enable(); return rc; } EXPORT_SYMBOL(iucv_message_send); /** * iucv_message_send2way * @path: address of iucv path structure * @msg: address of iucv msg structure * @flags: how the message is sent and the reply is received * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST) * @srccls: source class of message * @buffer: address of send buffer or address of struct iucv_array * @size: length of send buffer * @ansbuf: address of answer buffer or address of struct iucv_array * @asize: size of reply buffer * * This function transmits data to another application. Data to be * transmitted is in a buffer. The receiver of the send is expected to * reply to the message and a buffer is provided into which IUCV moves * the reply to this message. * * Returns the result from the CP IUCV call. */ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, u8 flags, u32 srccls, void *buffer, size_t size, void *answer, size_t asize, size_t *residual) { union iucv_param *parm; int rc; local_bh_disable(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (flags & IUCV_IPRMDATA) { parm->dpl.ippathid = path->pathid; parm->dpl.ipflags1 = path->flags; /* priority message */ parm->dpl.iptrgcls = msg->class; parm->dpl.ipsrccls = srccls; parm->dpl.ipmsgtag = msg->tag; parm->dpl.ipbfadr2 = (u32)(addr_t) answer; parm->dpl.ipbfln2f = (u32) asize; memcpy(parm->dpl.iprmmsg, buffer, 8); } else { parm->db.ippathid = path->pathid; parm->db.ipflags1 = path->flags; /* priority message */ parm->db.iptrgcls = msg->class; parm->db.ipsrccls = srccls; parm->db.ipmsgtag = msg->tag; parm->db.ipbfadr1 = (u32)(addr_t) buffer; parm->db.ipbfln1f = (u32) size; parm->db.ipbfadr2 = (u32)(addr_t) answer; parm->db.ipbfln2f = (u32) asize; } rc = iucv_call_b2f0(IUCV_SEND, parm); if (!rc) msg->id = parm->db.ipmsgid; out: local_bh_enable(); return rc; } EXPORT_SYMBOL(iucv_message_send2way); /** * iucv_path_pending * @data: Pointer to external interrupt buffer * * Process connection pending work item. Called from tasklet while holding * iucv_table_lock. */ struct iucv_path_pending { u16 ippathid; u8 ipflags1; u8 iptype; u16 ipmsglim; u16 res1; u8 ipvmid[8]; u8 ipuser[16]; u32 res3; u8 ippollfg; u8 res4[3]; } __packed; static void iucv_path_pending(struct iucv_irq_data *data) { struct iucv_path_pending *ipp = (void *) data; struct iucv_handler *handler; struct iucv_path *path; char *error; BUG_ON(iucv_path_table[ipp->ippathid]); /* New pathid, handler found. Create a new path struct. */ error = iucv_error_no_memory; path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC); if (!path) goto out_sever; path->pathid = ipp->ippathid; iucv_path_table[path->pathid] = path; EBCASC(ipp->ipvmid, 8); /* Call registered handler until one is found that wants the path. */ list_for_each_entry(handler, &iucv_handler_list, list) { if (!handler->path_pending) continue; /* * Add path to handler to allow a call to iucv_path_sever * inside the path_pending function. If the handler returns * an error remove the path from the handler again. */ list_add(&path->list, &handler->paths); path->handler = handler; if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser)) return; list_del(&path->list); path->handler = NULL; } /* No handler wanted the path. */ iucv_path_table[path->pathid] = NULL; iucv_path_free(path); error = iucv_error_no_listener; out_sever: iucv_sever_pathid(ipp->ippathid, error); } /** * iucv_path_complete * @data: Pointer to external interrupt buffer * * Process connection complete work item. Called from tasklet while holding * iucv_table_lock. */ struct iucv_path_complete { u16 ippathid; u8 ipflags1; u8 iptype; u16 ipmsglim; u16 res1; u8 res2[8]; u8 ipuser[16]; u32 res3; u8 ippollfg; u8 res4[3]; } __packed; static void iucv_path_complete(struct iucv_irq_data *data) { struct iucv_path_complete *ipc = (void *) data; struct iucv_path *path = iucv_path_table[ipc->ippathid]; if (path) path->flags = ipc->ipflags1; if (path && path->handler && path->handler->path_complete) path->handler->path_complete(path, ipc->ipuser); } /** * iucv_path_severed * @data: Pointer to external interrupt buffer * * Process connection severed work item. Called from tasklet while holding * iucv_table_lock. */ struct iucv_path_severed { u16 ippathid; u8 res1; u8 iptype; u32 res2; u8 res3[8]; u8 ipuser[16]; u32 res4; u8 ippollfg; u8 res5[3]; } __packed; static void iucv_path_severed(struct iucv_irq_data *data) { struct iucv_path_severed *ips = (void *) data; struct iucv_path *path = iucv_path_table[ips->ippathid]; if (!path || !path->handler) /* Already severed */ return; if (path->handler->path_severed) path->handler->path_severed(path, ips->ipuser); else { iucv_sever_pathid(path->pathid, NULL); iucv_path_table[path->pathid] = NULL; list_del(&path->list); iucv_path_free(path); } } /** * iucv_path_quiesced * @data: Pointer to external interrupt buffer * * Process connection quiesced work item. Called from tasklet while holding * iucv_table_lock. */ struct iucv_path_quiesced { u16 ippathid; u8 res1; u8 iptype; u32 res2; u8 res3[8]; u8 ipuser[16]; u32 res4; u8 ippollfg; u8 res5[3]; } __packed; static void iucv_path_quiesced(struct iucv_irq_data *data) { struct iucv_path_quiesced *ipq = (void *) data; struct iucv_path *path = iucv_path_table[ipq->ippathid]; if (path && path->handler && path->handler->path_quiesced) path->handler->path_quiesced(path, ipq->ipuser); } /** * iucv_path_resumed * @data: Pointer to external interrupt buffer * * Process connection resumed work item. Called from tasklet while holding * iucv_table_lock. */ struct iucv_path_resumed { u16 ippathid; u8 res1; u8 iptype; u32 res2; u8 res3[8]; u8 ipuser[16]; u32 res4; u8 ippollfg; u8 res5[3]; } __packed; static void iucv_path_resumed(struct iucv_irq_data *data) { struct iucv_path_resumed *ipr = (void *) data; struct iucv_path *path = iucv_path_table[ipr->ippathid]; if (path && path->handler && path->handler->path_resumed) path->handler->path_resumed(path, ipr->ipuser); } /** * iucv_message_complete * @data: Pointer to external interrupt buffer * * Process message complete work item. Called from tasklet while holding * iucv_table_lock. */ struct iucv_message_complete { u16 ippathid; u8 ipflags1; u8 iptype; u32 ipmsgid; u32 ipaudit; u8 iprmmsg[8]; u32 ipsrccls; u32 ipmsgtag; u32 res; u32 ipbfln2f; u8 ippollfg; u8 res2[3]; } __packed; static void iucv_message_complete(struct iucv_irq_data *data) { struct iucv_message_complete *imc = (void *) data; struct iucv_path *path = iucv_path_table[imc->ippathid]; struct iucv_message msg; if (path && path->handler && path->handler->message_complete) { msg.flags = imc->ipflags1; msg.id = imc->ipmsgid; msg.audit = imc->ipaudit; memcpy(msg.rmmsg, imc->iprmmsg, 8); msg.class = imc->ipsrccls; msg.tag = imc->ipmsgtag; msg.length = imc->ipbfln2f; path->handler->message_complete(path, &msg); } } /** * iucv_message_pending * @data: Pointer to external interrupt buffer * * Process message pending work item. Called from tasklet while holding * iucv_table_lock. */ struct iucv_message_pending { u16 ippathid; u8 ipflags1; u8 iptype; u32 ipmsgid; u32 iptrgcls; union { u32 iprmmsg1_u32; u8 iprmmsg1[4]; } ln1msg1; union { u32 ipbfln1f; u8 iprmmsg2[4]; } ln1msg2; u32 res1[3]; u32 ipbfln2f; u8 ippollfg; u8 res2[3]; } __packed; static void iucv_message_pending(struct iucv_irq_data *data) { struct iucv_message_pending *imp = (void *) data; struct iucv_path *path = iucv_path_table[imp->ippathid]; struct iucv_message msg; if (path && path->handler && path->handler->message_pending) { msg.flags = imp->ipflags1; msg.id = imp->ipmsgid; msg.class = imp->iptrgcls; if (imp->ipflags1 & IUCV_IPRMDATA) { memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8); msg.length = 8; } else msg.length = imp->ln1msg2.ipbfln1f; msg.reply_size = imp->ipbfln2f; path->handler->message_pending(path, &msg); } } /** * iucv_tasklet_fn: * * This tasklet loops over the queue of irq buffers created by * iucv_external_interrupt, calls the appropriate action handler * and then frees the buffer. */ static void iucv_tasklet_fn(unsigned long ignored) { typedef void iucv_irq_fn(struct iucv_irq_data *); static iucv_irq_fn *irq_fn[] = { [0x02] = iucv_path_complete, [0x03] = iucv_path_severed, [0x04] = iucv_path_quiesced, [0x05] = iucv_path_resumed, [0x06] = iucv_message_complete, [0x07] = iucv_message_complete, [0x08] = iucv_message_pending, [0x09] = iucv_message_pending, }; LIST_HEAD(task_queue); struct iucv_irq_list *p, *n; /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ if (!spin_trylock(&iucv_table_lock)) { tasklet_schedule(&iucv_tasklet); return; } iucv_active_cpu = smp_processor_id(); spin_lock_irq(&iucv_queue_lock); list_splice_init(&iucv_task_queue, &task_queue); spin_unlock_irq(&iucv_queue_lock); list_for_each_entry_safe(p, n, &task_queue, list) { list_del_init(&p->list); irq_fn[p->data.iptype](&p->data); kfree(p); } iucv_active_cpu = -1; spin_unlock(&iucv_table_lock); } /** * iucv_work_fn: * * This work function loops over the queue of path pending irq blocks * created by iucv_external_interrupt, calls the appropriate action * handler and then frees the buffer. */ static void iucv_work_fn(struct work_struct *work) { LIST_HEAD(work_queue); struct iucv_irq_list *p, *n; /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ spin_lock_bh(&iucv_table_lock); iucv_active_cpu = smp_processor_id(); spin_lock_irq(&iucv_queue_lock); list_splice_init(&iucv_work_queue, &work_queue); spin_unlock_irq(&iucv_queue_lock); iucv_cleanup_queue(); list_for_each_entry_safe(p, n, &work_queue, list) { list_del_init(&p->list); iucv_path_pending(&p->data); kfree(p); } iucv_active_cpu = -1; spin_unlock_bh(&iucv_table_lock); } /** * iucv_external_interrupt * @code: irq code * * Handles external interrupts coming in from CP. * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). */ static void iucv_external_interrupt(struct ext_code ext_code, unsigned int param32, unsigned long param64) { struct iucv_irq_data *p; struct iucv_irq_list *work; inc_irq_stat(IRQEXT_IUC); p = iucv_irq_data[smp_processor_id()]; if (p->ippathid >= iucv_max_pathid) { WARN_ON(p->ippathid >= iucv_max_pathid); iucv_sever_pathid(p->ippathid, iucv_error_no_listener); return; } BUG_ON(p->iptype < 0x01 || p->iptype > 0x09); work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); if (!work) { pr_warning("iucv_external_interrupt: out of memory\n"); return; } memcpy(&work->data, p, sizeof(work->data)); spin_lock(&iucv_queue_lock); if (p->iptype == 0x01) { /* Path pending interrupt. */ list_add_tail(&work->list, &iucv_work_queue); schedule_work(&iucv_work); } else { /* The other interrupts. */ list_add_tail(&work->list, &iucv_task_queue); tasklet_schedule(&iucv_tasklet); } spin_unlock(&iucv_queue_lock); } static int iucv_pm_prepare(struct device *dev) { int rc = 0; #ifdef CONFIG_PM_DEBUG printk(KERN_INFO "iucv_pm_prepare\n"); #endif if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) rc = dev->driver->pm->prepare(dev); return rc; } static void iucv_pm_complete(struct device *dev) { #ifdef CONFIG_PM_DEBUG printk(KERN_INFO "iucv_pm_complete\n"); #endif if (dev->driver && dev->driver->pm && dev->driver->pm->complete) dev->driver->pm->complete(dev); } /** * iucv_path_table_empty() - determine if iucv path table is empty * * Returns 0 if there are still iucv pathes defined * 1 if there are no iucv pathes defined */ int iucv_path_table_empty(void) { int i; for (i = 0; i < iucv_max_pathid; i++) { if (iucv_path_table[i]) return 0; } return 1; } /** * iucv_pm_freeze() - Freeze PM callback * @dev: iucv-based device * * disable iucv interrupts * invoke callback function of the iucv-based driver * shut down iucv, if no iucv-pathes are established anymore */ static int iucv_pm_freeze(struct device *dev) { int cpu; struct iucv_irq_list *p, *n; int rc = 0; #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "iucv_pm_freeze\n"); #endif if (iucv_pm_state != IUCV_PM_FREEZING) { for_each_cpu(cpu, &iucv_irq_cpumask) smp_call_function_single(cpu, iucv_block_cpu_almost, NULL, 1); cancel_work_sync(&iucv_work); list_for_each_entry_safe(p, n, &iucv_work_queue, list) { list_del_init(&p->list); iucv_sever_pathid(p->data.ippathid, iucv_error_no_listener); kfree(p); } } iucv_pm_state = IUCV_PM_FREEZING; if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) rc = dev->driver->pm->freeze(dev); if (iucv_path_table_empty()) iucv_disable(); return rc; } /** * iucv_pm_thaw() - Thaw PM callback * @dev: iucv-based device * * make iucv ready for use again: allocate path table, declare interrupt buffers * and enable iucv interrupts * invoke callback function of the iucv-based driver */ static int iucv_pm_thaw(struct device *dev) { int rc = 0; #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "iucv_pm_thaw\n"); #endif iucv_pm_state = IUCV_PM_THAWING; if (!iucv_path_table) { rc = iucv_enable(); if (rc) goto out; } if (cpumask_empty(&iucv_irq_cpumask)) { if (iucv_nonsmp_handler) /* enable interrupts on one cpu */ iucv_allow_cpu(NULL); else /* enable interrupts on all cpus */ iucv_setmask_mp(); } if (dev->driver && dev->driver->pm && dev->driver->pm->thaw) rc = dev->driver->pm->thaw(dev); out: return rc; } /** * iucv_pm_restore() - Restore PM callback * @dev: iucv-based device * * make iucv ready for use again: allocate path table, declare interrupt buffers * and enable iucv interrupts * invoke callback function of the iucv-based driver */ static int iucv_pm_restore(struct device *dev) { int rc = 0; #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table); #endif if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table) pr_warning("Suspending Linux did not completely close all IUCV " "connections\n"); iucv_pm_state = IUCV_PM_RESTORING; if (cpumask_empty(&iucv_irq_cpumask)) { rc = iucv_query_maxconn(); rc = iucv_enable(); if (rc) goto out; } if (dev->driver && dev->driver->pm && dev->driver->pm->restore) rc = dev->driver->pm->restore(dev); out: return rc; } struct iucv_interface iucv_if = { .message_receive = iucv_message_receive, .__message_receive = __iucv_message_receive, .message_reply = iucv_message_reply, .message_reject = iucv_message_reject, .message_send = iucv_message_send, .__message_send = __iucv_message_send, .message_send2way = iucv_message_send2way, .message_purge = iucv_message_purge, .path_accept = iucv_path_accept, .path_connect = iucv_path_connect, .path_quiesce = iucv_path_quiesce, .path_resume = iucv_path_resume, .path_sever = iucv_path_sever, .iucv_register = iucv_register, .iucv_unregister = iucv_unregister, .bus = NULL, .root = NULL, }; EXPORT_SYMBOL(iucv_if); /** * iucv_init * * Allocates and initializes various data structures. */ static int __init iucv_init(void) { int rc; int cpu; if (!MACHINE_IS_VM) { rc = -EPROTONOSUPPORT; goto out; } ctl_set_bit(0, 1); rc = iucv_query_maxconn(); if (rc) goto out_ctl; rc = register_external_interrupt(0x4000, iucv_external_interrupt); if (rc) goto out_ctl; iucv_root = root_device_register("iucv"); if (IS_ERR(iucv_root)) { rc = PTR_ERR(iucv_root); goto out_int; } for_each_online_cpu(cpu) { /* Note: GFP_DMA used to get memory below 2G */ iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_irq_data[cpu]) { rc = -ENOMEM; goto out_free; } /* Allocate parameter blocks. */ iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_param[cpu]) { rc = -ENOMEM; goto out_free; } iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_param_irq[cpu]) { rc = -ENOMEM; goto out_free; } } rc = register_hotcpu_notifier(&iucv_cpu_notifier); if (rc) goto out_free; rc = register_reboot_notifier(&iucv_reboot_notifier); if (rc) goto out_cpu; ASCEBC(iucv_error_no_listener, 16); ASCEBC(iucv_error_no_memory, 16); ASCEBC(iucv_error_pathid, 16); iucv_available = 1; rc = bus_register(&iucv_bus); if (rc) goto out_reboot; iucv_if.root = iucv_root; iucv_if.bus = &iucv_bus; return 0; out_reboot: unregister_reboot_notifier(&iucv_reboot_notifier); out_cpu: unregister_hotcpu_notifier(&iucv_cpu_notifier); out_free: for_each_possible_cpu(cpu) { kfree(iucv_param_irq[cpu]); iucv_param_irq[cpu] = NULL; kfree(iucv_param[cpu]); iucv_param[cpu] = NULL; kfree(iucv_irq_data[cpu]); iucv_irq_data[cpu] = NULL; } root_device_unregister(iucv_root); out_int: unregister_external_interrupt(0x4000, iucv_external_interrupt); out_ctl: ctl_clear_bit(0, 1); out: return rc; } /** * iucv_exit * * Frees everything allocated from iucv_init. */ static void __exit iucv_exit(void) { struct iucv_irq_list *p, *n; int cpu; spin_lock_irq(&iucv_queue_lock); list_for_each_entry_safe(p, n, &iucv_task_queue, list) kfree(p); list_for_each_entry_safe(p, n, &iucv_work_queue, list) kfree(p); spin_unlock_irq(&iucv_queue_lock); unregister_reboot_notifier(&iucv_reboot_notifier); unregister_hotcpu_notifier(&iucv_cpu_notifier); for_each_possible_cpu(cpu) { kfree(iucv_param_irq[cpu]); iucv_param_irq[cpu] = NULL; kfree(iucv_param[cpu]); iucv_param[cpu] = NULL; kfree(iucv_irq_data[cpu]); iucv_irq_data[cpu] = NULL; } root_device_unregister(iucv_root); bus_unregister(&iucv_bus); unregister_external_interrupt(0x4000, iucv_external_interrupt); } subsys_initcall(iucv_init); module_exit(iucv_exit); MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); MODULE_LICENSE("GPL");
gpl-2.0
fards/ainol_elfii_common
drivers/ata/pata_pcmcia.c
2379
13741
/* * pata_pcmcia.c - PCMCIA PATA controller driver. * Copyright 2005-2006 Red Hat Inc, all rights reserved. * PCMCIA ident update Copyright 2006 Marcin Juszkiewicz * <openembedded@hrw.one.pl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * Heavily based upon ide-cs.c * The initial developer of the original code is David A. Hinds * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/slab.h> #include <scsi/scsi_host.h> #include <linux/ata.h> #include <linux/libata.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> #define DRV_NAME "pata_pcmcia" #define DRV_VERSION "0.3.5" /** * pcmcia_set_mode - PCMCIA specific mode setup * @link: link * @r_failed_dev: Return pointer for failed device * * Perform the tuning and setup of the devices and timings, which * for PCMCIA is the same as any other controller. We wrap it however * as we need to spot hardware with incorrect or missing master/slave * decode, which alas is embarrassingly common in the PC world */ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) { struct ata_device *master = &link->device[0]; struct ata_device *slave = &link->device[1]; if (!ata_dev_enabled(master) || !ata_dev_enabled(slave)) return ata_do_set_mode(link, r_failed_dev); if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV, ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) { /* Suspicious match, but could be two cards from the same vendor - check serial */ if (memcmp(master->id + ATA_ID_SERNO, slave->id + ATA_ID_SERNO, ATA_ID_SERNO_LEN) == 0 && master->id[ATA_ID_SERNO] >> 8) { ata_dev_printk(slave, KERN_WARNING, "is a ghost device, ignoring.\n"); ata_dev_disable(slave); } } return ata_do_set_mode(link, r_failed_dev); } /** * pcmcia_set_mode_8bit - PCMCIA specific mode setup * @link: link * @r_failed_dev: Return pointer for failed device * * For the simple emulated 8bit stuff the less we do the better. */ static int pcmcia_set_mode_8bit(struct ata_link *link, struct ata_device **r_failed_dev) { return 0; } /** * ata_data_xfer_8bit - Transfer data by 8bit PIO * @dev: device to target * @buf: data buffer * @buflen: buffer length * @rw: read/write * * Transfer data from/to the device data register by 8 bit PIO. * * LOCKING: * Inherited from caller. */ static unsigned int ata_data_xfer_8bit(struct ata_device *dev, unsigned char *buf, unsigned int buflen, int rw) { struct ata_port *ap = dev->link->ap; if (rw == READ) ioread8_rep(ap->ioaddr.data_addr, buf, buflen); else iowrite8_rep(ap->ioaddr.data_addr, buf, buflen); return buflen; } /** * pcmcia_8bit_drain_fifo - Stock FIFO drain logic for SFF controllers * @qc: command * * Drain the FIFO and device of any stuck data following a command * failing to complete. In some cases this is necessary before a * reset will recover the device. * */ static void pcmcia_8bit_drain_fifo(struct ata_queued_cmd *qc) { int count; struct ata_port *ap; /* We only need to flush incoming data when a command was running */ if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) return; ap = qc->ap; /* Drain up to 64K of data before we give up this recovery method */ for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) && count++ < 65536;) ioread8(ap->ioaddr.data_addr); if (count) ata_port_printk(ap, KERN_WARNING, "drained %d bytes to clear DRQ.\n", count); } static struct scsi_host_template pcmcia_sht = { ATA_PIO_SHT(DRV_NAME), }; static struct ata_port_operations pcmcia_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ata_sff_data_xfer_noirq, .cable_detect = ata_cable_40wire, .set_mode = pcmcia_set_mode, }; static struct ata_port_operations pcmcia_8bit_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ata_data_xfer_8bit, .cable_detect = ata_cable_40wire, .set_mode = pcmcia_set_mode_8bit, .sff_drain_fifo = pcmcia_8bit_drain_fifo, }; static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data) { int *is_kme = priv_data; if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) { pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; } pdev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; pdev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; if (pdev->resource[1]->end) { pdev->resource[0]->end = 8; pdev->resource[1]->end = (*is_kme) ? 2 : 1; } else { if (pdev->resource[0]->end < 16) return -ENODEV; } return pcmcia_request_io(pdev); } /** * pcmcia_init_one - attach a PCMCIA interface * @pdev: pcmcia device * * Register a PCMCIA IDE interface. Such interfaces are PIO 0 and * shared IRQ. */ static int pcmcia_init_one(struct pcmcia_device *pdev) { struct ata_host *host; struct ata_port *ap; int is_kme = 0, ret = -ENOMEM, p; unsigned long io_base, ctl_base; void __iomem *io_addr, *ctl_addr; int n_ports = 1; struct ata_port_operations *ops = &pcmcia_port_ops; /* Set up attributes in order to probe card and get resources */ pdev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO | CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC; /* See if we have a manufacturer identifier. Use it to set is_kme for vendor quirks */ is_kme = ((pdev->manf_id == MANFID_KME) && ((pdev->card_id == PRODID_KME_KXLC005_A) || (pdev->card_id == PRODID_KME_KXLC005_B))); if (pcmcia_loop_config(pdev, pcmcia_check_one_config, &is_kme)) { pdev->config_flags &= ~CONF_AUTO_CHECK_VCC; if (pcmcia_loop_config(pdev, pcmcia_check_one_config, &is_kme)) goto failed; /* No suitable config found */ } io_base = pdev->resource[0]->start; if (pdev->resource[1]->end) ctl_base = pdev->resource[1]->start; else ctl_base = pdev->resource[0]->start + 0x0e; if (!pdev->irq) goto failed; ret = pcmcia_enable_device(pdev); if (ret) goto failed; /* iomap */ ret = -ENOMEM; io_addr = devm_ioport_map(&pdev->dev, io_base, 8); ctl_addr = devm_ioport_map(&pdev->dev, ctl_base, 1); if (!io_addr || !ctl_addr) goto failed; /* Success. Disable the IRQ nIEN line, do quirks */ iowrite8(0x02, ctl_addr); if (is_kme) iowrite8(0x81, ctl_addr + 0x01); /* FIXME: Could be more ports at base + 0x10 but we only deal with one right now */ if (resource_size(pdev->resource[0]) >= 0x20) n_ports = 2; if (pdev->manf_id == 0x0097 && pdev->card_id == 0x1620) ops = &pcmcia_8bit_port_ops; /* * Having done the PCMCIA plumbing the ATA side is relatively * sane. */ ret = -ENOMEM; host = ata_host_alloc(&pdev->dev, n_ports); if (!host) goto failed; for (p = 0; p < n_ports; p++) { ap = host->ports[p]; ap->ops = ops; ap->pio_mask = ATA_PIO0; /* ISA so PIO 0 cycles */ ap->flags |= ATA_FLAG_SLAVE_POSS; ap->ioaddr.cmd_addr = io_addr + 0x10 * p; ap->ioaddr.altstatus_addr = ctl_addr + 0x10 * p; ap->ioaddr.ctl_addr = ctl_addr + 0x10 * p; ata_sff_std_ports(&ap->ioaddr); ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base); } /* activate */ ret = ata_host_activate(host, pdev->irq, ata_sff_interrupt, IRQF_SHARED, &pcmcia_sht); if (ret) goto failed; pdev->priv = host; return 0; failed: pcmcia_disable_device(pdev); return ret; } /** * pcmcia_remove_one - unplug an pcmcia interface * @pdev: pcmcia device * * A PCMCIA ATA device has been unplugged. Perform the needed * cleanup. Also called on module unload for any active devices. */ static void pcmcia_remove_one(struct pcmcia_device *pdev) { struct ata_host *host = pdev->priv; if (host) ata_host_detach(host); pcmcia_disable_device(pdev); } static const struct pcmcia_device_id pcmcia_devices[] = { PCMCIA_DEVICE_FUNC_ID(4), PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000), /* Corsair */ PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */ PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000), /* I-O Data CFA */ PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001), /* Mitsubishi CFA */ PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704), PCMCIA_DEVICE_MANF_CARD(0x0032, 0x2904), PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */ PCMCIA_DEVICE_MANF_CARD(0x004f, 0x0000), /* Kingston */ PCMCIA_DEVICE_MANF_CARD(0x0097, 0x1620), /* TI emulated */ PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */ PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d), PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */ PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */ PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001), PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */ PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar, Viking CFA */ PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0), PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74), PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591), PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), PCMCIA_DEVICE_PROD_ID12("EXP", "CD+GAME", 0x6f58c983, 0x63c13aaf), PCMCIA_DEVICE_PROD_ID12("EXP ", "CD-ROM", 0x0a5c52fd, 0x66536591), PCMCIA_DEVICE_PROD_ID12("EXP ", "PnPIDE", 0x0a5c52fd, 0x0c694728), PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e), PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae), PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178), PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x55d5bffb), PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10), PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674), PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b), PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF300", 0x7ed2ad87, 0x7e9e78ee), PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c), PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728), PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1), PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883), PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d), PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6), PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003), PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF45", 0x709b1bf1, 0xf68b6f32), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x7558f133), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47), PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e), PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6), PCMCIA_DEVICE_PROD_ID2("Flash Card", 0x5a362506), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, pcmcia_devices); static struct pcmcia_driver pcmcia_driver = { .owner = THIS_MODULE, .name = DRV_NAME, .id_table = pcmcia_devices, .probe = pcmcia_init_one, .remove = pcmcia_remove_one, }; static int __init pcmcia_init(void) { return pcmcia_register_driver(&pcmcia_driver); } static void __exit pcmcia_exit(void) { pcmcia_unregister_driver(&pcmcia_driver); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for PCMCIA ATA"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); module_init(pcmcia_init); module_exit(pcmcia_exit);
gpl-2.0
SlimRoms/kernel_xiaomi_armani
mm/page_isolation.c
3659
3786
/* * linux/mm/page_isolation.c */ #include <linux/mm.h> #include <linux/page-isolation.h> #include <linux/pageblock-flags.h> #include "internal.h" static inline struct page * __first_valid_page(unsigned long pfn, unsigned long nr_pages) { int i; for (i = 0; i < nr_pages; i++) if (pfn_valid_within(pfn + i)) break; if (unlikely(i == nr_pages)) return NULL; return pfn_to_page(pfn + i); } /* * start_isolate_page_range() -- make page-allocation-type of range of pages * to be MIGRATE_ISOLATE. * @start_pfn: The lower PFN of the range to be isolated. * @end_pfn: The upper PFN of the range to be isolated. * @migratetype: migrate type to set in error recovery. * * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in * the range will never be allocated. Any free pages and pages freed in the * future will not be allocated again. * * start_pfn/end_pfn must be aligned to pageblock_order. * Returns 0 on success and -EBUSY if any part of range cannot be isolated. */ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, unsigned migratetype) { unsigned long pfn; unsigned long undo_pfn; struct page *page; BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); BUG_ON((end_pfn) & (pageblock_nr_pages - 1)); for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); if (page && set_migratetype_isolate(page)) { undo_pfn = pfn; goto undo; } } return 0; undo: for (pfn = start_pfn; pfn < undo_pfn; pfn += pageblock_nr_pages) unset_migratetype_isolate(pfn_to_page(pfn), migratetype); return -EBUSY; } /* * Make isolated pages available again. */ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, unsigned migratetype) { unsigned long pfn; struct page *page; BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); BUG_ON((end_pfn) & (pageblock_nr_pages - 1)); for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE) continue; unset_migratetype_isolate(page, migratetype); } return 0; } /* * Test all pages in the range is free(means isolated) or not. * all pages in [start_pfn...end_pfn) must be in the same zone. * zone->lock must be held before call this. * * Returns 1 if all pages in the range are isolated. */ static int __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn) { struct page *page; while (pfn < end_pfn) { if (!pfn_valid_within(pfn)) { pfn++; continue; } page = pfn_to_page(pfn); if (PageBuddy(page)) pfn += 1 << page_order(page); else if (page_count(page) == 0 && page_private(page) == MIGRATE_ISOLATE) pfn += 1; else break; } if (pfn < end_pfn) return 0; return 1; } int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn, flags; struct page *page; struct zone *zone; int ret; /* * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page * is not aligned to pageblock_nr_pages. * Then we just check pagetype fist. */ for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE) break; } page = __first_valid_page(start_pfn, end_pfn - start_pfn); if ((pfn < end_pfn) || !page) return -EBUSY; /* Check all pages are free or Marked as ISOLATED */ zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn); spin_unlock_irqrestore(&zone->lock, flags); return ret ? 0 : -EBUSY; }
gpl-2.0
emceethemouth/kernel_jflte
net/core/net-sysfs.c
4427
36473
/* * net-sysfs.c - network device class and attributes * * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/capability.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/slab.h> #include <linux/nsproxy.h> #include <net/sock.h> #include <net/net_namespace.h> #include <linux/rtnetlink.h> #include <linux/wireless.h> #include <linux/vmalloc.h> #include <linux/export.h> #include <linux/jiffies.h> #include <net/wext.h> #include "net-sysfs.h" #ifdef CONFIG_SYSFS static const char fmt_hex[] = "%#x\n"; static const char fmt_long_hex[] = "%#lx\n"; static const char fmt_dec[] = "%d\n"; static const char fmt_udec[] = "%u\n"; static const char fmt_ulong[] = "%lu\n"; static const char fmt_u64[] = "%llu\n"; static inline int dev_isalive(const struct net_device *dev) { return dev->reg_state <= NETREG_REGISTERED; } /* use same locking rules as GIF* ioctl's */ static ssize_t netdev_show(const struct device *dev, struct device_attribute *attr, char *buf, ssize_t (*format)(const struct net_device *, char *)) { struct net_device *net = to_net_dev(dev); ssize_t ret = -EINVAL; read_lock(&dev_base_lock); if (dev_isalive(net)) ret = (*format)(net, buf); read_unlock(&dev_base_lock); return ret; } /* generate a show function for simple field */ #define NETDEVICE_SHOW(field, format_string) \ static ssize_t format_##field(const struct net_device *net, char *buf) \ { \ return sprintf(buf, format_string, net->field); \ } \ static ssize_t show_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ return netdev_show(dev, attr, buf, format_##field); \ } /* use same locking and permission rules as SIF* ioctl's */ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len, int (*set)(struct net_device *, unsigned long)) { struct net_device *net = to_net_dev(dev); char *endp; unsigned long new; int ret = -EINVAL; if (!capable(CAP_NET_ADMIN)) return -EPERM; new = simple_strtoul(buf, &endp, 0); if (endp == buf) goto err; if (!rtnl_trylock()) return restart_syscall(); if (dev_isalive(net)) { if ((ret = (*set)(net, new)) == 0) ret = len; } rtnl_unlock(); err: return ret; } NETDEVICE_SHOW(dev_id, fmt_hex); NETDEVICE_SHOW(addr_assign_type, fmt_dec); NETDEVICE_SHOW(addr_len, fmt_dec); NETDEVICE_SHOW(iflink, fmt_dec); NETDEVICE_SHOW(ifindex, fmt_dec); NETDEVICE_SHOW(type, fmt_dec); NETDEVICE_SHOW(link_mode, fmt_dec); /* use same locking rules as GIFHWADDR ioctl's */ static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *net = to_net_dev(dev); ssize_t ret = -EINVAL; read_lock(&dev_base_lock); if (dev_isalive(net)) ret = sysfs_format_mac(buf, net->dev_addr, net->addr_len); read_unlock(&dev_base_lock); return ret; } static ssize_t show_broadcast(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *net = to_net_dev(dev); if (dev_isalive(net)) return sysfs_format_mac(buf, net->broadcast, net->addr_len); return -EINVAL; } static ssize_t show_carrier(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); if (netif_running(netdev)) { return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev)); } return -EINVAL; } static ssize_t show_speed(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL; if (!rtnl_trylock()) return restart_syscall(); if (netif_running(netdev)) { struct ethtool_cmd cmd; if (!__ethtool_get_settings(netdev, &cmd)) ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd)); } rtnl_unlock(); return ret; } static ssize_t show_duplex(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL; if (!rtnl_trylock()) return restart_syscall(); if (netif_running(netdev)) { struct ethtool_cmd cmd; if (!__ethtool_get_settings(netdev, &cmd)) ret = sprintf(buf, "%s\n", cmd.duplex ? "full" : "half"); } rtnl_unlock(); return ret; } static ssize_t show_dormant(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); if (netif_running(netdev)) return sprintf(buf, fmt_dec, !!netif_dormant(netdev)); return -EINVAL; } static const char *const operstates[] = { "unknown", "notpresent", /* currently unused */ "down", "lowerlayerdown", "testing", /* currently unused */ "dormant", "up" }; static ssize_t show_operstate(struct device *dev, struct device_attribute *attr, char *buf) { const struct net_device *netdev = to_net_dev(dev); unsigned char operstate; read_lock(&dev_base_lock); operstate = netdev->operstate; if (!netif_running(netdev)) operstate = IF_OPER_DOWN; read_unlock(&dev_base_lock); if (operstate >= ARRAY_SIZE(operstates)) return -EINVAL; /* should not happen */ return sprintf(buf, "%s\n", operstates[operstate]); } /* read-write attributes */ NETDEVICE_SHOW(mtu, fmt_dec); static int change_mtu(struct net_device *net, unsigned long new_mtu) { return dev_set_mtu(net, (int) new_mtu); } static ssize_t store_mtu(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { return netdev_store(dev, attr, buf, len, change_mtu); } NETDEVICE_SHOW(flags, fmt_hex); static int change_flags(struct net_device *net, unsigned long new_flags) { return dev_change_flags(net, (unsigned) new_flags); } static ssize_t store_flags(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { return netdev_store(dev, attr, buf, len, change_flags); } NETDEVICE_SHOW(tx_queue_len, fmt_ulong); static int change_tx_queue_len(struct net_device *net, unsigned long new_len) { net->tx_queue_len = new_len; return 0; } static ssize_t store_tx_queue_len(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { return netdev_store(dev, attr, buf, len, change_tx_queue_len); } static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); size_t count = len; ssize_t ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; /* ignore trailing newline */ if (len > 0 && buf[len - 1] == '\n') --count; if (!rtnl_trylock()) return restart_syscall(); ret = dev_set_alias(netdev, buf, count); rtnl_unlock(); return ret < 0 ? ret : len; } static ssize_t show_ifalias(struct device *dev, struct device_attribute *attr, char *buf) { const struct net_device *netdev = to_net_dev(dev); ssize_t ret = 0; if (!rtnl_trylock()) return restart_syscall(); if (netdev->ifalias) ret = sprintf(buf, "%s\n", netdev->ifalias); rtnl_unlock(); return ret; } NETDEVICE_SHOW(group, fmt_dec); static int change_group(struct net_device *net, unsigned long new_group) { dev_set_group(net, (int) new_group); return 0; } static ssize_t store_group(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { return netdev_store(dev, attr, buf, len, change_group); } static struct device_attribute net_class_attributes[] = { __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL), __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), __ATTR(dev_id, S_IRUGO, show_dev_id, NULL), __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias), __ATTR(iflink, S_IRUGO, show_iflink, NULL), __ATTR(ifindex, S_IRUGO, show_ifindex, NULL), __ATTR(type, S_IRUGO, show_type, NULL), __ATTR(link_mode, S_IRUGO, show_link_mode, NULL), __ATTR(address, S_IRUGO, show_address, NULL), __ATTR(broadcast, S_IRUGO, show_broadcast, NULL), __ATTR(carrier, S_IRUGO, show_carrier, NULL), __ATTR(speed, S_IRUGO, show_speed, NULL), __ATTR(duplex, S_IRUGO, show_duplex, NULL), __ATTR(dormant, S_IRUGO, show_dormant, NULL), __ATTR(operstate, S_IRUGO, show_operstate, NULL), __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu), __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags), __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len, store_tx_queue_len), __ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group), {} }; /* Show a given an attribute in the statistics group */ static ssize_t netstat_show(const struct device *d, struct device_attribute *attr, char *buf, unsigned long offset) { struct net_device *dev = to_net_dev(d); ssize_t ret = -EINVAL; WARN_ON(offset > sizeof(struct rtnl_link_stats64) || offset % sizeof(u64) != 0); read_lock(&dev_base_lock); if (dev_isalive(dev)) { struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset)); } read_unlock(&dev_base_lock); return ret; } /* generate a read-only statistics attribute */ #define NETSTAT_ENTRY(name) \ static ssize_t show_##name(struct device *d, \ struct device_attribute *attr, char *buf) \ { \ return netstat_show(d, attr, buf, \ offsetof(struct rtnl_link_stats64, name)); \ } \ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) NETSTAT_ENTRY(rx_packets); NETSTAT_ENTRY(tx_packets); NETSTAT_ENTRY(rx_bytes); NETSTAT_ENTRY(tx_bytes); NETSTAT_ENTRY(rx_errors); NETSTAT_ENTRY(tx_errors); NETSTAT_ENTRY(rx_dropped); NETSTAT_ENTRY(tx_dropped); NETSTAT_ENTRY(multicast); NETSTAT_ENTRY(collisions); NETSTAT_ENTRY(rx_length_errors); NETSTAT_ENTRY(rx_over_errors); NETSTAT_ENTRY(rx_crc_errors); NETSTAT_ENTRY(rx_frame_errors); NETSTAT_ENTRY(rx_fifo_errors); NETSTAT_ENTRY(rx_missed_errors); NETSTAT_ENTRY(tx_aborted_errors); NETSTAT_ENTRY(tx_carrier_errors); NETSTAT_ENTRY(tx_fifo_errors); NETSTAT_ENTRY(tx_heartbeat_errors); NETSTAT_ENTRY(tx_window_errors); NETSTAT_ENTRY(rx_compressed); NETSTAT_ENTRY(tx_compressed); static struct attribute *netstat_attrs[] = { &dev_attr_rx_packets.attr, &dev_attr_tx_packets.attr, &dev_attr_rx_bytes.attr, &dev_attr_tx_bytes.attr, &dev_attr_rx_errors.attr, &dev_attr_tx_errors.attr, &dev_attr_rx_dropped.attr, &dev_attr_tx_dropped.attr, &dev_attr_multicast.attr, &dev_attr_collisions.attr, &dev_attr_rx_length_errors.attr, &dev_attr_rx_over_errors.attr, &dev_attr_rx_crc_errors.attr, &dev_attr_rx_frame_errors.attr, &dev_attr_rx_fifo_errors.attr, &dev_attr_rx_missed_errors.attr, &dev_attr_tx_aborted_errors.attr, &dev_attr_tx_carrier_errors.attr, &dev_attr_tx_fifo_errors.attr, &dev_attr_tx_heartbeat_errors.attr, &dev_attr_tx_window_errors.attr, &dev_attr_rx_compressed.attr, &dev_attr_tx_compressed.attr, NULL }; static struct attribute_group netstat_group = { .name = "statistics", .attrs = netstat_attrs, }; #ifdef CONFIG_WIRELESS_EXT_SYSFS /* helper function that does all the locking etc for wireless stats */ static ssize_t wireless_show(struct device *d, char *buf, ssize_t (*format)(const struct iw_statistics *, char *)) { struct net_device *dev = to_net_dev(d); const struct iw_statistics *iw; ssize_t ret = -EINVAL; if (!rtnl_trylock()) return restart_syscall(); if (dev_isalive(dev)) { iw = get_wireless_stats(dev); if (iw) ret = (*format)(iw, buf); } rtnl_unlock(); return ret; } /* show function template for wireless fields */ #define WIRELESS_SHOW(name, field, format_string) \ static ssize_t format_iw_##name(const struct iw_statistics *iw, char *buf) \ { \ return sprintf(buf, format_string, iw->field); \ } \ static ssize_t show_iw_##name(struct device *d, \ struct device_attribute *attr, char *buf) \ { \ return wireless_show(d, buf, format_iw_##name); \ } \ static DEVICE_ATTR(name, S_IRUGO, show_iw_##name, NULL) WIRELESS_SHOW(status, status, fmt_hex); WIRELESS_SHOW(link, qual.qual, fmt_dec); WIRELESS_SHOW(level, qual.level, fmt_dec); WIRELESS_SHOW(noise, qual.noise, fmt_dec); WIRELESS_SHOW(nwid, discard.nwid, fmt_dec); WIRELESS_SHOW(crypt, discard.code, fmt_dec); WIRELESS_SHOW(fragment, discard.fragment, fmt_dec); WIRELESS_SHOW(misc, discard.misc, fmt_dec); WIRELESS_SHOW(retries, discard.retries, fmt_dec); WIRELESS_SHOW(beacon, miss.beacon, fmt_dec); static struct attribute *wireless_attrs[] = { &dev_attr_status.attr, &dev_attr_link.attr, &dev_attr_level.attr, &dev_attr_noise.attr, &dev_attr_nwid.attr, &dev_attr_crypt.attr, &dev_attr_fragment.attr, &dev_attr_retries.attr, &dev_attr_misc.attr, &dev_attr_beacon.attr, NULL }; static struct attribute_group wireless_group = { .name = "wireless", .attrs = wireless_attrs, }; #endif #endif /* CONFIG_SYSFS */ #ifdef CONFIG_RPS /* * RX queue sysfs structures and functions. */ struct rx_queue_attribute { struct attribute attr; ssize_t (*show)(struct netdev_rx_queue *queue, struct rx_queue_attribute *attr, char *buf); ssize_t (*store)(struct netdev_rx_queue *queue, struct rx_queue_attribute *attr, const char *buf, size_t len); }; #define to_rx_queue_attr(_attr) container_of(_attr, \ struct rx_queue_attribute, attr) #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); struct netdev_rx_queue *queue = to_rx_queue(kobj); if (!attribute->show) return -EIO; return attribute->show(queue, attribute, buf); } static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); struct netdev_rx_queue *queue = to_rx_queue(kobj); if (!attribute->store) return -EIO; return attribute->store(queue, attribute, buf, count); } static const struct sysfs_ops rx_queue_sysfs_ops = { .show = rx_queue_attr_show, .store = rx_queue_attr_store, }; static ssize_t show_rps_map(struct netdev_rx_queue *queue, struct rx_queue_attribute *attribute, char *buf) { struct rps_map *map; cpumask_var_t mask; size_t len = 0; int i; if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; rcu_read_lock(); map = rcu_dereference(queue->rps_map); if (map) for (i = 0; i < map->len; i++) cpumask_set_cpu(map->cpus[i], mask); len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask); if (PAGE_SIZE - len < 3) { rcu_read_unlock(); free_cpumask_var(mask); return -EINVAL; } rcu_read_unlock(); free_cpumask_var(mask); len += sprintf(buf + len, "\n"); return len; } static ssize_t store_rps_map(struct netdev_rx_queue *queue, struct rx_queue_attribute *attribute, const char *buf, size_t len) { struct rps_map *old_map, *map; cpumask_var_t mask; int err, cpu, i; static DEFINE_SPINLOCK(rps_map_lock); if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); if (err) { free_cpumask_var(mask); return err; } map = kzalloc(max_t(unsigned, RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), GFP_KERNEL); if (!map) { free_cpumask_var(mask); return -ENOMEM; } i = 0; for_each_cpu_and(cpu, mask, cpu_online_mask) map->cpus[i++] = cpu; if (i) map->len = i; else { kfree(map); map = NULL; } spin_lock(&rps_map_lock); old_map = rcu_dereference_protected(queue->rps_map, lockdep_is_held(&rps_map_lock)); rcu_assign_pointer(queue->rps_map, map); spin_unlock(&rps_map_lock); if (map) static_key_slow_inc(&rps_needed); if (old_map) { kfree_rcu(old_map, rcu); static_key_slow_dec(&rps_needed); } free_cpumask_var(mask); return len; } static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, struct rx_queue_attribute *attr, char *buf) { struct rps_dev_flow_table *flow_table; unsigned long val = 0; rcu_read_lock(); flow_table = rcu_dereference(queue->rps_flow_table); if (flow_table) val = (unsigned long)flow_table->mask + 1; rcu_read_unlock(); return sprintf(buf, "%lu\n", val); } static void rps_dev_flow_table_release_work(struct work_struct *work) { struct rps_dev_flow_table *table = container_of(work, struct rps_dev_flow_table, free_work); vfree(table); } static void rps_dev_flow_table_release(struct rcu_head *rcu) { struct rps_dev_flow_table *table = container_of(rcu, struct rps_dev_flow_table, rcu); INIT_WORK(&table->free_work, rps_dev_flow_table_release_work); schedule_work(&table->free_work); } static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, struct rx_queue_attribute *attr, const char *buf, size_t len) { unsigned long mask, count; struct rps_dev_flow_table *table, *old_table; static DEFINE_SPINLOCK(rps_dev_flow_lock); int rc; if (!capable(CAP_NET_ADMIN)) return -EPERM; rc = kstrtoul(buf, 0, &count); if (rc < 0) return rc; if (count) { mask = count - 1; /* mask = roundup_pow_of_two(count) - 1; * without overflows... */ while ((mask | (mask >> 1)) != mask) mask |= (mask >> 1); /* On 64 bit arches, must check mask fits in table->mask (u32), * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1) * doesnt overflow. */ #if BITS_PER_LONG > 32 if (mask > (unsigned long)(u32)mask) return -EINVAL; #else if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1)) / sizeof(struct rps_dev_flow)) { /* Enforce a limit to prevent overflow */ return -EINVAL; } #endif table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1)); if (!table) return -ENOMEM; table->mask = mask; for (count = 0; count <= mask; count++) table->flows[count].cpu = RPS_NO_CPU; } else table = NULL; spin_lock(&rps_dev_flow_lock); old_table = rcu_dereference_protected(queue->rps_flow_table, lockdep_is_held(&rps_dev_flow_lock)); rcu_assign_pointer(queue->rps_flow_table, table); spin_unlock(&rps_dev_flow_lock); if (old_table) call_rcu(&old_table->rcu, rps_dev_flow_table_release); return len; } static struct rx_queue_attribute rps_cpus_attribute = __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map); static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute = __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR, show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt); static struct attribute *rx_queue_default_attrs[] = { &rps_cpus_attribute.attr, &rps_dev_flow_table_cnt_attribute.attr, NULL }; static void rx_queue_release(struct kobject *kobj) { struct netdev_rx_queue *queue = to_rx_queue(kobj); struct rps_map *map; struct rps_dev_flow_table *flow_table; map = rcu_dereference_protected(queue->rps_map, 1); if (map) { RCU_INIT_POINTER(queue->rps_map, NULL); kfree_rcu(map, rcu); } flow_table = rcu_dereference_protected(queue->rps_flow_table, 1); if (flow_table) { RCU_INIT_POINTER(queue->rps_flow_table, NULL); call_rcu(&flow_table->rcu, rps_dev_flow_table_release); } memset(kobj, 0, sizeof(*kobj)); dev_put(queue->dev); } static struct kobj_type rx_queue_ktype = { .sysfs_ops = &rx_queue_sysfs_ops, .release = rx_queue_release, .default_attrs = rx_queue_default_attrs, }; static int rx_queue_add_kobject(struct net_device *net, int index) { struct netdev_rx_queue *queue = net->_rx + index; struct kobject *kobj = &queue->kobj; int error = 0; kobj->kset = net->queues_kset; error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, "rx-%u", index); if (error) { kobject_put(kobj); return error; } kobject_uevent(kobj, KOBJ_ADD); dev_hold(queue->dev); return error; } #endif /* CONFIG_RPS */ int net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num) { #ifdef CONFIG_RPS int i; int error = 0; for (i = old_num; i < new_num; i++) { error = rx_queue_add_kobject(net, i); if (error) { new_num = old_num; break; } } while (--i >= new_num) kobject_put(&net->_rx[i].kobj); return error; #else return 0; #endif } #ifdef CONFIG_SYSFS /* * netdev_queue sysfs structures and functions. */ struct netdev_queue_attribute { struct attribute attr; ssize_t (*show)(struct netdev_queue *queue, struct netdev_queue_attribute *attr, char *buf); ssize_t (*store)(struct netdev_queue *queue, struct netdev_queue_attribute *attr, const char *buf, size_t len); }; #define to_netdev_queue_attr(_attr) container_of(_attr, \ struct netdev_queue_attribute, attr) #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj) static ssize_t netdev_queue_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr); struct netdev_queue *queue = to_netdev_queue(kobj); if (!attribute->show) return -EIO; return attribute->show(queue, attribute, buf); } static ssize_t netdev_queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr); struct netdev_queue *queue = to_netdev_queue(kobj); if (!attribute->store) return -EIO; return attribute->store(queue, attribute, buf, count); } static const struct sysfs_ops netdev_queue_sysfs_ops = { .show = netdev_queue_attr_show, .store = netdev_queue_attr_store, }; static ssize_t show_trans_timeout(struct netdev_queue *queue, struct netdev_queue_attribute *attribute, char *buf) { unsigned long trans_timeout; spin_lock_irq(&queue->_xmit_lock); trans_timeout = queue->trans_timeout; spin_unlock_irq(&queue->_xmit_lock); return sprintf(buf, "%lu", trans_timeout); } static struct netdev_queue_attribute queue_trans_timeout = __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL); #ifdef CONFIG_BQL /* * Byte queue limits sysfs structures and functions. */ static ssize_t bql_show(char *buf, unsigned int value) { return sprintf(buf, "%u\n", value); } static ssize_t bql_set(const char *buf, const size_t count, unsigned int *pvalue) { unsigned int value; int err; if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) value = DQL_MAX_LIMIT; else { err = kstrtouint(buf, 10, &value); if (err < 0) return err; if (value > DQL_MAX_LIMIT) return -EINVAL; } *pvalue = value; return count; } static ssize_t bql_show_hold_time(struct netdev_queue *queue, struct netdev_queue_attribute *attr, char *buf) { struct dql *dql = &queue->dql; return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time)); } static ssize_t bql_set_hold_time(struct netdev_queue *queue, struct netdev_queue_attribute *attribute, const char *buf, size_t len) { struct dql *dql = &queue->dql; unsigned value; int err; err = kstrtouint(buf, 10, &value); if (err < 0) return err; dql->slack_hold_time = msecs_to_jiffies(value); return len; } static struct netdev_queue_attribute bql_hold_time_attribute = __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time, bql_set_hold_time); static ssize_t bql_show_inflight(struct netdev_queue *queue, struct netdev_queue_attribute *attr, char *buf) { struct dql *dql = &queue->dql; return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed); } static struct netdev_queue_attribute bql_inflight_attribute = __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL); #define BQL_ATTR(NAME, FIELD) \ static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \ struct netdev_queue_attribute *attr, \ char *buf) \ { \ return bql_show(buf, queue->dql.FIELD); \ } \ \ static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \ struct netdev_queue_attribute *attr, \ const char *buf, size_t len) \ { \ return bql_set(buf, len, &queue->dql.FIELD); \ } \ \ static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \ __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \ bql_set_ ## NAME); BQL_ATTR(limit, limit) BQL_ATTR(limit_max, max_limit) BQL_ATTR(limit_min, min_limit) static struct attribute *dql_attrs[] = { &bql_limit_attribute.attr, &bql_limit_max_attribute.attr, &bql_limit_min_attribute.attr, &bql_hold_time_attribute.attr, &bql_inflight_attribute.attr, NULL }; static struct attribute_group dql_group = { .name = "byte_queue_limits", .attrs = dql_attrs, }; #endif /* CONFIG_BQL */ #ifdef CONFIG_XPS static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue) { struct net_device *dev = queue->dev; int i; for (i = 0; i < dev->num_tx_queues; i++) if (queue == &dev->_tx[i]) break; BUG_ON(i >= dev->num_tx_queues); return i; } static ssize_t show_xps_map(struct netdev_queue *queue, struct netdev_queue_attribute *attribute, char *buf) { struct net_device *dev = queue->dev; struct xps_dev_maps *dev_maps; cpumask_var_t mask; unsigned long index; size_t len = 0; int i; if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; index = get_netdev_queue_index(queue); rcu_read_lock(); dev_maps = rcu_dereference(dev->xps_maps); if (dev_maps) { for_each_possible_cpu(i) { struct xps_map *map = rcu_dereference(dev_maps->cpu_map[i]); if (map) { int j; for (j = 0; j < map->len; j++) { if (map->queues[j] == index) { cpumask_set_cpu(i, mask); break; } } } } } rcu_read_unlock(); len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask); if (PAGE_SIZE - len < 3) { free_cpumask_var(mask); return -EINVAL; } free_cpumask_var(mask); len += sprintf(buf + len, "\n"); return len; } static DEFINE_MUTEX(xps_map_mutex); #define xmap_dereference(P) \ rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) static void xps_queue_release(struct netdev_queue *queue) { struct net_device *dev = queue->dev; struct xps_dev_maps *dev_maps; struct xps_map *map; unsigned long index; int i, pos, nonempty = 0; index = get_netdev_queue_index(queue); mutex_lock(&xps_map_mutex); dev_maps = xmap_dereference(dev->xps_maps); if (dev_maps) { for_each_possible_cpu(i) { map = xmap_dereference(dev_maps->cpu_map[i]); if (!map) continue; for (pos = 0; pos < map->len; pos++) if (map->queues[pos] == index) break; if (pos < map->len) { if (map->len > 1) map->queues[pos] = map->queues[--map->len]; else { RCU_INIT_POINTER(dev_maps->cpu_map[i], NULL); kfree_rcu(map, rcu); map = NULL; } } if (map) nonempty = 1; } if (!nonempty) { RCU_INIT_POINTER(dev->xps_maps, NULL); kfree_rcu(dev_maps, rcu); } } mutex_unlock(&xps_map_mutex); } static ssize_t store_xps_map(struct netdev_queue *queue, struct netdev_queue_attribute *attribute, const char *buf, size_t len) { struct net_device *dev = queue->dev; cpumask_var_t mask; int err, i, cpu, pos, map_len, alloc_len, need_set; unsigned long index; struct xps_map *map, *new_map; struct xps_dev_maps *dev_maps, *new_dev_maps; int nonempty = 0; int numa_node_id = -2; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; index = get_netdev_queue_index(queue); err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); if (err) { free_cpumask_var(mask); return err; } new_dev_maps = kzalloc(max_t(unsigned, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL); if (!new_dev_maps) { free_cpumask_var(mask); return -ENOMEM; } mutex_lock(&xps_map_mutex); dev_maps = xmap_dereference(dev->xps_maps); for_each_possible_cpu(cpu) { map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) : NULL; new_map = map; if (map) { for (pos = 0; pos < map->len; pos++) if (map->queues[pos] == index) break; map_len = map->len; alloc_len = map->alloc_len; } else pos = map_len = alloc_len = 0; need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu); #ifdef CONFIG_NUMA if (need_set) { if (numa_node_id == -2) numa_node_id = cpu_to_node(cpu); else if (numa_node_id != cpu_to_node(cpu)) numa_node_id = -1; } #endif if (need_set && pos >= map_len) { /* Need to add queue to this CPU's map */ if (map_len >= alloc_len) { alloc_len = alloc_len ? 2 * alloc_len : XPS_MIN_MAP_ALLOC; new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, cpu_to_node(cpu)); if (!new_map) goto error; new_map->alloc_len = alloc_len; for (i = 0; i < map_len; i++) new_map->queues[i] = map->queues[i]; new_map->len = map_len; } new_map->queues[new_map->len++] = index; } else if (!need_set && pos < map_len) { /* Need to remove queue from this CPU's map */ if (map_len > 1) new_map->queues[pos] = new_map->queues[--new_map->len]; else new_map = NULL; } RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map); } /* Cleanup old maps */ for_each_possible_cpu(cpu) { map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) : NULL; if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map) kfree_rcu(map, rcu); if (new_dev_maps->cpu_map[cpu]) nonempty = 1; } if (nonempty) { rcu_assign_pointer(dev->xps_maps, new_dev_maps); } else { kfree(new_dev_maps); RCU_INIT_POINTER(dev->xps_maps, NULL); } if (dev_maps) kfree_rcu(dev_maps, rcu); netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id : NUMA_NO_NODE); mutex_unlock(&xps_map_mutex); free_cpumask_var(mask); return len; error: mutex_unlock(&xps_map_mutex); if (new_dev_maps) for_each_possible_cpu(i) kfree(rcu_dereference_protected( new_dev_maps->cpu_map[i], 1)); kfree(new_dev_maps); free_cpumask_var(mask); return -ENOMEM; } static struct netdev_queue_attribute xps_cpus_attribute = __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map); #endif /* CONFIG_XPS */ static struct attribute *netdev_queue_default_attrs[] = { &queue_trans_timeout.attr, #ifdef CONFIG_XPS &xps_cpus_attribute.attr, #endif NULL }; static void netdev_queue_release(struct kobject *kobj) { struct netdev_queue *queue = to_netdev_queue(kobj); #ifdef CONFIG_XPS xps_queue_release(queue); #endif memset(kobj, 0, sizeof(*kobj)); dev_put(queue->dev); } static struct kobj_type netdev_queue_ktype = { .sysfs_ops = &netdev_queue_sysfs_ops, .release = netdev_queue_release, .default_attrs = netdev_queue_default_attrs, }; static int netdev_queue_add_kobject(struct net_device *net, int index) { struct netdev_queue *queue = net->_tx + index; struct kobject *kobj = &queue->kobj; int error = 0; kobj->kset = net->queues_kset; error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, "tx-%u", index); if (error) goto exit; #ifdef CONFIG_BQL error = sysfs_create_group(kobj, &dql_group); if (error) goto exit; #endif kobject_uevent(kobj, KOBJ_ADD); dev_hold(queue->dev); return 0; exit: kobject_put(kobj); return error; } #endif /* CONFIG_SYSFS */ int netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num) { #ifdef CONFIG_SYSFS int i; int error = 0; for (i = old_num; i < new_num; i++) { error = netdev_queue_add_kobject(net, i); if (error) { new_num = old_num; break; } } while (--i >= new_num) { struct netdev_queue *queue = net->_tx + i; #ifdef CONFIG_BQL sysfs_remove_group(&queue->kobj, &dql_group); #endif kobject_put(&queue->kobj); } return error; #else return 0; #endif /* CONFIG_SYSFS */ } static int register_queue_kobjects(struct net_device *net) { int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; #ifdef CONFIG_SYSFS net->queues_kset = kset_create_and_add("queues", NULL, &net->dev.kobj); if (!net->queues_kset) return -ENOMEM; #endif #ifdef CONFIG_RPS real_rx = net->real_num_rx_queues; #endif real_tx = net->real_num_tx_queues; error = net_rx_queue_update_kobjects(net, 0, real_rx); if (error) goto error; rxq = real_rx; error = netdev_queue_update_kobjects(net, 0, real_tx); if (error) goto error; txq = real_tx; return 0; error: netdev_queue_update_kobjects(net, txq, 0); net_rx_queue_update_kobjects(net, rxq, 0); return error; } static void remove_queue_kobjects(struct net_device *net) { int real_rx = 0, real_tx = 0; #ifdef CONFIG_RPS real_rx = net->real_num_rx_queues; #endif real_tx = net->real_num_tx_queues; net_rx_queue_update_kobjects(net, real_rx, 0); netdev_queue_update_kobjects(net, real_tx, 0); #ifdef CONFIG_SYSFS kset_unregister(net->queues_kset); #endif } static void *net_grab_current_ns(void) { struct net *ns = current->nsproxy->net_ns; #ifdef CONFIG_NET_NS if (ns) atomic_inc(&ns->passive); #endif return ns; } static const void *net_initial_ns(void) { return &init_net; } static const void *net_netlink_ns(struct sock *sk) { return sock_net(sk); } struct kobj_ns_type_operations net_ns_type_operations = { .type = KOBJ_NS_TYPE_NET, .grab_current_ns = net_grab_current_ns, .netlink_ns = net_netlink_ns, .initial_ns = net_initial_ns, .drop_ns = net_drop_ns, }; EXPORT_SYMBOL_GPL(net_ns_type_operations); #ifdef CONFIG_HOTPLUG static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) { struct net_device *dev = to_net_dev(d); int retval; /* pass interface to uevent. */ retval = add_uevent_var(env, "INTERFACE=%s", dev->name); if (retval) goto exit; /* pass ifindex to uevent. * ifindex is useful as it won't change (interface name may change) * and is what RtNetlink uses natively. */ retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex); exit: return retval; } #endif /* * netdev_release -- destroy and free a dead device. * Called when last reference to device kobject is gone. */ static void netdev_release(struct device *d) { struct net_device *dev = to_net_dev(d); BUG_ON(dev->reg_state != NETREG_RELEASED); kfree(dev->ifalias); kfree((char *)dev - dev->padded); } static const void *net_namespace(struct device *d) { struct net_device *dev; dev = container_of(d, struct net_device, dev); return dev_net(dev); } static struct class net_class = { .name = "net", .dev_release = netdev_release, #ifdef CONFIG_SYSFS .dev_attrs = net_class_attributes, #endif /* CONFIG_SYSFS */ #ifdef CONFIG_HOTPLUG .dev_uevent = netdev_uevent, #endif .ns_type = &net_ns_type_operations, .namespace = net_namespace, }; /* Delete sysfs entries but hold kobject reference until after all * netdev references are gone. */ void netdev_unregister_kobject(struct net_device * net) { struct device *dev = &(net->dev); kobject_get(&dev->kobj); remove_queue_kobjects(net); device_del(dev); } /* Create sysfs entries for network device. */ int netdev_register_kobject(struct net_device *net) { struct device *dev = &(net->dev); const struct attribute_group **groups = net->sysfs_groups; int error = 0; device_initialize(dev); dev->class = &net_class; dev->platform_data = net; dev->groups = groups; dev_set_name(dev, "%s", net->name); #ifdef CONFIG_SYSFS /* Allow for a device specific group */ if (*groups) groups++; *groups++ = &netstat_group; #ifdef CONFIG_WIRELESS_EXT_SYSFS if (net->ieee80211_ptr) *groups++ = &wireless_group; #ifdef CONFIG_WIRELESS_EXT else if (net->wireless_handlers) *groups++ = &wireless_group; #endif #endif #endif /* CONFIG_SYSFS */ error = device_add(dev); if (error) return error; error = register_queue_kobjects(net); if (error) { device_del(dev); return error; } return error; } int netdev_class_create_file(struct class_attribute *class_attr) { return class_create_file(&net_class, class_attr); } EXPORT_SYMBOL(netdev_class_create_file); void netdev_class_remove_file(struct class_attribute *class_attr) { class_remove_file(&net_class, class_attr); } EXPORT_SYMBOL(netdev_class_remove_file); int netdev_kobject_init(void) { kobj_ns_type_register(&net_ns_type_operations); return class_register(&net_class); }
gpl-2.0
Jackeagle/android_kernel_samsung_a7lte
drivers/misc/vmw_vmci/vmci_event.c
4427
5539
/* * VMware VMCI Driver * * Copyright (C) 2012 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. */ #include <linux/vmw_vmci_defs.h> #include <linux/vmw_vmci_api.h> #include <linux/list.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include "vmci_driver.h" #include "vmci_event.h" #define EVENT_MAGIC 0xEABE0000 #define VMCI_EVENT_MAX_ATTEMPTS 10 struct vmci_subscription { u32 id; u32 event; vmci_event_cb callback; void *callback_data; struct list_head node; /* on one of subscriber lists */ }; static struct list_head subscriber_array[VMCI_EVENT_MAX]; static DEFINE_MUTEX(subscriber_mutex); int __init vmci_event_init(void) { int i; for (i = 0; i < VMCI_EVENT_MAX; i++) INIT_LIST_HEAD(&subscriber_array[i]); return VMCI_SUCCESS; } void vmci_event_exit(void) { int e; /* We free all memory at exit. */ for (e = 0; e < VMCI_EVENT_MAX; e++) { struct vmci_subscription *cur, *p2; list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) { /* * We should never get here because all events * should have been unregistered before we try * to unload the driver module. */ pr_warn("Unexpected free events occurring\n"); list_del(&cur->node); kfree(cur); } } } /* * Find entry. Assumes subscriber_mutex is held. */ static struct vmci_subscription *event_find(u32 sub_id) { int e; for (e = 0; e < VMCI_EVENT_MAX; e++) { struct vmci_subscription *cur; list_for_each_entry(cur, &subscriber_array[e], node) { if (cur->id == sub_id) return cur; } } return NULL; } /* * Actually delivers the events to the subscribers. * The callback function for each subscriber is invoked. */ static void event_deliver(struct vmci_event_msg *event_msg) { struct vmci_subscription *cur; struct list_head *subscriber_list; rcu_read_lock(); subscriber_list = &subscriber_array[event_msg->event_data.event]; list_for_each_entry_rcu(cur, subscriber_list, node) { cur->callback(cur->id, &event_msg->event_data, cur->callback_data); } rcu_read_unlock(); } /* * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all * subscribers for given event. */ int vmci_event_dispatch(struct vmci_datagram *msg) { struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg; if (msg->payload_size < sizeof(u32) || msg->payload_size > sizeof(struct vmci_event_data_max)) return VMCI_ERROR_INVALID_ARGS; if (!VMCI_EVENT_VALID(event_msg->event_data.event)) return VMCI_ERROR_EVENT_UNKNOWN; event_deliver(event_msg); return VMCI_SUCCESS; } /* * vmci_event_subscribe() - Subscribe to a given event. * @event: The event to subscribe to. * @callback: The callback to invoke upon the event. * @callback_data: Data to pass to the callback. * @subscription_id: ID used to track subscription. Used with * vmci_event_unsubscribe() * * Subscribes to the provided event. The callback specified will be * fired from RCU critical section and therefore must not sleep. */ int vmci_event_subscribe(u32 event, vmci_event_cb callback, void *callback_data, u32 *new_subscription_id) { struct vmci_subscription *sub; int attempts; int retval; bool have_new_id = false; if (!new_subscription_id) { pr_devel("%s: Invalid subscription (NULL)\n", __func__); return VMCI_ERROR_INVALID_ARGS; } if (!VMCI_EVENT_VALID(event) || !callback) { pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n", __func__, event, callback, callback_data); return VMCI_ERROR_INVALID_ARGS; } sub = kzalloc(sizeof(*sub), GFP_KERNEL); if (!sub) return VMCI_ERROR_NO_MEM; sub->id = VMCI_EVENT_MAX; sub->event = event; sub->callback = callback; sub->callback_data = callback_data; INIT_LIST_HEAD(&sub->node); mutex_lock(&subscriber_mutex); /* Creation of a new event is always allowed. */ for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) { static u32 subscription_id; /* * We try to get an id a couple of time before * claiming we are out of resources. */ /* Test for duplicate id. */ if (!event_find(++subscription_id)) { sub->id = subscription_id; have_new_id = true; break; } } if (have_new_id) { list_add_rcu(&sub->node, &subscriber_array[event]); retval = VMCI_SUCCESS; } else { retval = VMCI_ERROR_NO_RESOURCES; } mutex_unlock(&subscriber_mutex); *new_subscription_id = sub->id; return retval; } EXPORT_SYMBOL_GPL(vmci_event_subscribe); /* * vmci_event_unsubscribe() - unsubscribe from an event. * @sub_id: A subscription ID as provided by vmci_event_subscribe() * * Unsubscribe from given event. Removes it from list and frees it. * Will return callback_data if requested by caller. */ int vmci_event_unsubscribe(u32 sub_id) { struct vmci_subscription *s; mutex_lock(&subscriber_mutex); s = event_find(sub_id); if (s) list_del_rcu(&s->node); mutex_unlock(&subscriber_mutex); if (!s) return VMCI_ERROR_NOT_FOUND; synchronize_rcu(); kfree(s); return VMCI_SUCCESS; } EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);
gpl-2.0
dizgustipated/BOCA-2.6.35.14
drivers/isdn/i4l/isdn_bsdcomp.c
4427
24005
/* * BSD compression module * * Patched version for ISDN syncPPP written 1997/1998 by Michael Hipp * The whole module is now SKB based. * */ /* * Update: The Berkeley copyright was changed, and the change * is retroactive to all "true" BSD software (ie everything * from UCB as opposed to other peoples code that just carried * the same license). The new copyright doesn't clash with the * GPL, so the module-only restriction has been removed.. */ /* * Original copyright notice: * * Copyright (c) 1985, 1986 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * James A. Woods, derived from original work by Spencer Thomas * and Joseph Orost. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/string.h> /* used in new tty drivers */ #include <linux/signal.h> /* used in new tty drivers */ #include <linux/bitops.h> #include <asm/system.h> #include <asm/byteorder.h> #include <asm/types.h> #include <linux/if.h> #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/inet.h> #include <linux/ioctl.h> #include <linux/vmalloc.h> #include <linux/ppp_defs.h> #include <linux/isdn.h> #include <linux/isdn_ppp.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/if_arp.h> #include <linux/ppp-comp.h> #include "isdn_ppp.h" MODULE_DESCRIPTION("ISDN4Linux: BSD Compression for PPP over ISDN"); MODULE_LICENSE("Dual BSD/GPL"); #define BSD_VERSION(x) ((x) >> 5) #define BSD_NBITS(x) ((x) & 0x1F) #define BSD_CURRENT_VERSION 1 #define DEBUG 1 /* * A dictionary for doing BSD compress. */ struct bsd_dict { u32 fcode; u16 codem1; /* output of hash table -1 */ u16 cptr; /* map code to hash table entry */ }; struct bsd_db { int totlen; /* length of this structure */ unsigned int hsize; /* size of the hash table */ unsigned char hshift; /* used in hash function */ unsigned char n_bits; /* current bits/code */ unsigned char maxbits; /* maximum bits/code */ unsigned char debug; /* non-zero if debug desired */ unsigned char unit; /* ppp unit number */ u16 seqno; /* sequence # of next packet */ unsigned int mru; /* size of receive (decompress) bufr */ unsigned int maxmaxcode; /* largest valid code */ unsigned int max_ent; /* largest code in use */ unsigned int in_count; /* uncompressed bytes, aged */ unsigned int bytes_out; /* compressed bytes, aged */ unsigned int ratio; /* recent compression ratio */ unsigned int checkpoint; /* when to next check the ratio */ unsigned int clear_count; /* times dictionary cleared */ unsigned int incomp_count; /* incompressible packets */ unsigned int incomp_bytes; /* incompressible bytes */ unsigned int uncomp_count; /* uncompressed packets */ unsigned int uncomp_bytes; /* uncompressed bytes */ unsigned int comp_count; /* compressed packets */ unsigned int comp_bytes; /* compressed bytes */ unsigned short *lens; /* array of lengths of codes */ struct bsd_dict *dict; /* dictionary */ int xmit; }; #define BSD_OVHD 2 /* BSD compress overhead/packet */ #define MIN_BSD_BITS 9 #define BSD_INIT_BITS MIN_BSD_BITS #define MAX_BSD_BITS 15 /* * the next two codes should not be changed lightly, as they must not * lie within the contiguous general code space. */ #define CLEAR 256 /* table clear output code */ #define FIRST 257 /* first free entry */ #define LAST 255 #define MAXCODE(b) ((1 << (b)) - 1) #define BADCODEM1 MAXCODE(MAX_BSD_BITS); #define BSD_HASH(prefix,suffix,hshift) ((((unsigned long)(suffix))<<(hshift)) \ ^ (unsigned long)(prefix)) #define BSD_KEY(prefix,suffix) ((((unsigned long)(suffix)) << 16) \ + (unsigned long)(prefix)) #define CHECK_GAP 10000 /* Ratio check interval */ #define RATIO_SCALE_LOG 8 #define RATIO_SCALE (1<<RATIO_SCALE_LOG) #define RATIO_MAX (0x7fffffff>>RATIO_SCALE_LOG) /* * clear the dictionary */ static void bsd_clear(struct bsd_db *db) { db->clear_count++; db->max_ent = FIRST-1; db->n_bits = BSD_INIT_BITS; db->bytes_out = 0; db->in_count = 0; db->incomp_count = 0; db->ratio = 0; db->checkpoint = CHECK_GAP; } /* * If the dictionary is full, then see if it is time to reset it. * * Compute the compression ratio using fixed-point arithmetic * with 8 fractional bits. * * Since we have an infinite stream instead of a single file, * watch only the local compression ratio. * * Since both peers must reset the dictionary at the same time even in * the absence of CLEAR codes (while packets are incompressible), they * must compute the same ratio. */ static int bsd_check (struct bsd_db *db) /* 1=output CLEAR */ { unsigned int new_ratio; if (db->in_count >= db->checkpoint) { /* age the ratio by limiting the size of the counts */ if (db->in_count >= RATIO_MAX || db->bytes_out >= RATIO_MAX) { db->in_count -= (db->in_count >> 2); db->bytes_out -= (db->bytes_out >> 2); } db->checkpoint = db->in_count + CHECK_GAP; if (db->max_ent >= db->maxmaxcode) { /* Reset the dictionary only if the ratio is worse, * or if it looks as if it has been poisoned * by incompressible data. * * This does not overflow, because * db->in_count <= RATIO_MAX. */ new_ratio = db->in_count << RATIO_SCALE_LOG; if (db->bytes_out != 0) { new_ratio /= db->bytes_out; } if (new_ratio < db->ratio || new_ratio < 1 * RATIO_SCALE) { bsd_clear (db); return 1; } db->ratio = new_ratio; } } return 0; } /* * Return statistics. */ static void bsd_stats (void *state, struct compstat *stats) { struct bsd_db *db = (struct bsd_db *) state; stats->unc_bytes = db->uncomp_bytes; stats->unc_packets = db->uncomp_count; stats->comp_bytes = db->comp_bytes; stats->comp_packets = db->comp_count; stats->inc_bytes = db->incomp_bytes; stats->inc_packets = db->incomp_count; stats->in_count = db->in_count; stats->bytes_out = db->bytes_out; } /* * Reset state, as on a CCP ResetReq. */ static void bsd_reset (void *state,unsigned char code, unsigned char id, unsigned char *data, unsigned len, struct isdn_ppp_resetparams *rsparm) { struct bsd_db *db = (struct bsd_db *) state; bsd_clear(db); db->seqno = 0; db->clear_count = 0; } /* * Release the compression structure */ static void bsd_free (void *state) { struct bsd_db *db = (struct bsd_db *) state; if (db) { /* * Release the dictionary */ vfree(db->dict); db->dict = NULL; /* * Release the string buffer */ vfree(db->lens); db->lens = NULL; /* * Finally release the structure itself. */ kfree(db); } } /* * Allocate space for a (de) compressor. */ static void *bsd_alloc (struct isdn_ppp_comp_data *data) { int bits; unsigned int hsize, hshift, maxmaxcode; struct bsd_db *db; int decomp; static unsigned int htab[][2] = { { 5003 , 4 } , { 5003 , 4 } , { 5003 , 4 } , { 5003 , 4 } , { 9001 , 5 } , { 18013 , 6 } , { 35023 , 7 } , { 69001 , 8 } }; if (data->optlen != 1 || data->num != CI_BSD_COMPRESS || BSD_VERSION(data->options[0]) != BSD_CURRENT_VERSION) return NULL; bits = BSD_NBITS(data->options[0]); if(bits < 9 || bits > 15) return NULL; hsize = htab[bits-9][0]; hshift = htab[bits-9][1]; /* * Allocate the main control structure for this instance. */ maxmaxcode = MAXCODE(bits); db = kzalloc (sizeof (struct bsd_db),GFP_KERNEL); if (!db) return NULL; db->xmit = data->flags & IPPP_COMP_FLAG_XMIT; decomp = db->xmit ? 0 : 1; /* * Allocate space for the dictionary. This may be more than one page in * length. */ db->dict = vmalloc(hsize * sizeof(struct bsd_dict)); if (!db->dict) { bsd_free (db); return NULL; } /* * If this is the compression buffer then there is no length data. * For decompression, the length information is needed as well. */ if (!decomp) db->lens = NULL; else { db->lens = vmalloc((maxmaxcode + 1) * sizeof(db->lens[0])); if (!db->lens) { bsd_free (db); return (NULL); } } /* * Initialize the data information for the compression code */ db->totlen = sizeof (struct bsd_db) + (sizeof (struct bsd_dict) * hsize); db->hsize = hsize; db->hshift = hshift; db->maxmaxcode = maxmaxcode; db->maxbits = bits; return (void *) db; } /* * Initialize the database. */ static int bsd_init (void *state, struct isdn_ppp_comp_data *data, int unit, int debug) { struct bsd_db *db = state; int indx; int decomp; if(!state || !data) { printk(KERN_ERR "isdn_bsd_init: [%d] ERR, state %lx data %lx\n",unit,(long)state,(long)data); return 0; } decomp = db->xmit ? 0 : 1; if (data->optlen != 1 || data->num != CI_BSD_COMPRESS || (BSD_VERSION(data->options[0]) != BSD_CURRENT_VERSION) || (BSD_NBITS(data->options[0]) != db->maxbits) || (decomp && db->lens == NULL)) { printk(KERN_ERR "isdn_bsd: %d %d %d %d %lx\n",data->optlen,data->num,data->options[0],decomp,(unsigned long)db->lens); return 0; } if (decomp) for(indx=LAST;indx>=0;indx--) db->lens[indx] = 1; indx = db->hsize; while (indx-- != 0) { db->dict[indx].codem1 = BADCODEM1; db->dict[indx].cptr = 0; } db->unit = unit; db->mru = 0; db->debug = 1; bsd_reset(db,0,0,NULL,0,NULL); return 1; } /* * Obtain pointers to the various structures in the compression tables */ #define dict_ptrx(p,idx) &(p->dict[idx]) #define lens_ptrx(p,idx) &(p->lens[idx]) #ifdef DEBUG static unsigned short *lens_ptr(struct bsd_db *db, int idx) { if ((unsigned int) idx > (unsigned int) db->maxmaxcode) { printk (KERN_DEBUG "<9>ppp: lens_ptr(%d) > max\n", idx); idx = 0; } return lens_ptrx (db, idx); } static struct bsd_dict *dict_ptr(struct bsd_db *db, int idx) { if ((unsigned int) idx >= (unsigned int) db->hsize) { printk (KERN_DEBUG "<9>ppp: dict_ptr(%d) > max\n", idx); idx = 0; } return dict_ptrx (db, idx); } #else #define lens_ptr(db,idx) lens_ptrx(db,idx) #define dict_ptr(db,idx) dict_ptrx(db,idx) #endif /* * compress a packet */ static int bsd_compress (void *state, struct sk_buff *skb_in, struct sk_buff *skb_out,int proto) { struct bsd_db *db; int hshift; unsigned int max_ent; unsigned int n_bits; unsigned int bitno; unsigned long accm; int ent; unsigned long fcode; struct bsd_dict *dictp; unsigned char c; int hval,disp,ilen,mxcode; unsigned char *rptr = skb_in->data; int isize = skb_in->len; #define OUTPUT(ent) \ { \ bitno -= n_bits; \ accm |= ((ent) << bitno); \ do { \ if(skb_out && skb_tailroom(skb_out) > 0) \ *(skb_put(skb_out,1)) = (unsigned char) (accm>>24); \ accm <<= 8; \ bitno += 8; \ } while (bitno <= 24); \ } /* * If the protocol is not in the range we're interested in, * just return without compressing the packet. If it is, * the protocol becomes the first byte to compress. */ printk(KERN_DEBUG "bsd_compress called with %x\n",proto); ent = proto; if (proto < 0x21 || proto > 0xf9 || !(proto & 0x1) ) return 0; db = (struct bsd_db *) state; hshift = db->hshift; max_ent = db->max_ent; n_bits = db->n_bits; bitno = 32; accm = 0; mxcode = MAXCODE (n_bits); /* This is the PPP header information */ if(skb_out && skb_tailroom(skb_out) >= 2) { char *v = skb_put(skb_out,2); /* we only push our own data on the header, AC,PC and protos is pushed by caller */ v[0] = db->seqno >> 8; v[1] = db->seqno; } ilen = ++isize; /* This is off by one, but that is what is in draft! */ while (--ilen > 0) { c = *rptr++; fcode = BSD_KEY (ent, c); hval = BSD_HASH (ent, c, hshift); dictp = dict_ptr (db, hval); /* Validate and then check the entry. */ if (dictp->codem1 >= max_ent) goto nomatch; if (dictp->fcode == fcode) { ent = dictp->codem1 + 1; continue; /* found (prefix,suffix) */ } /* continue probing until a match or invalid entry */ disp = (hval == 0) ? 1 : hval; do { hval += disp; if (hval >= db->hsize) hval -= db->hsize; dictp = dict_ptr (db, hval); if (dictp->codem1 >= max_ent) goto nomatch; } while (dictp->fcode != fcode); ent = dictp->codem1 + 1; /* finally found (prefix,suffix) */ continue; nomatch: OUTPUT(ent); /* output the prefix */ /* code -> hashtable */ if (max_ent < db->maxmaxcode) { struct bsd_dict *dictp2; struct bsd_dict *dictp3; int indx; /* expand code size if needed */ if (max_ent >= mxcode) { db->n_bits = ++n_bits; mxcode = MAXCODE (n_bits); } /* * Invalidate old hash table entry using * this code, and then take it over. */ dictp2 = dict_ptr (db, max_ent + 1); indx = dictp2->cptr; dictp3 = dict_ptr (db, indx); if (dictp3->codem1 == max_ent) dictp3->codem1 = BADCODEM1; dictp2->cptr = hval; dictp->codem1 = max_ent; dictp->fcode = fcode; db->max_ent = ++max_ent; if (db->lens) { unsigned short *len1 = lens_ptr (db, max_ent); unsigned short *len2 = lens_ptr (db, ent); *len1 = *len2 + 1; } } ent = c; } OUTPUT(ent); /* output the last code */ if(skb_out) db->bytes_out += skb_out->len; /* Do not count bytes from here */ db->uncomp_bytes += isize; db->in_count += isize; ++db->uncomp_count; ++db->seqno; if (bitno < 32) ++db->bytes_out; /* must be set before calling bsd_check */ /* * Generate the clear command if needed */ if (bsd_check(db)) OUTPUT (CLEAR); /* * Pad dribble bits of last code with ones. * Do not emit a completely useless byte of ones. */ if (bitno < 32 && skb_out && skb_tailroom(skb_out) > 0) *(skb_put(skb_out,1)) = (unsigned char) ((accm | (0xff << (bitno-8))) >> 24); /* * Increase code size if we would have without the packet * boundary because the decompressor will do so. */ if (max_ent >= mxcode && max_ent < db->maxmaxcode) db->n_bits++; /* If output length is too large then this is an incompressible frame. */ if (!skb_out || (skb_out && skb_out->len >= skb_in->len) ) { ++db->incomp_count; db->incomp_bytes += isize; return 0; } /* Count the number of compressed frames */ ++db->comp_count; db->comp_bytes += skb_out->len; return skb_out->len; #undef OUTPUT } /* * Update the "BSD Compress" dictionary on the receiver for * incompressible data by pretending to compress the incoming data. */ static void bsd_incomp (void *state, struct sk_buff *skb_in,int proto) { bsd_compress (state, skb_in, NULL, proto); } /* * Decompress "BSD Compress". */ static int bsd_decompress (void *state, struct sk_buff *skb_in, struct sk_buff *skb_out, struct isdn_ppp_resetparams *rsparm) { struct bsd_db *db; unsigned int max_ent; unsigned long accm; unsigned int bitno; /* 1st valid bit in accm */ unsigned int n_bits; unsigned int tgtbitno; /* bitno when we have a code */ struct bsd_dict *dictp; int seq; unsigned int incode; unsigned int oldcode; unsigned int finchar; unsigned char *p,*ibuf; int ilen; int codelen; int extra; db = (struct bsd_db *) state; max_ent = db->max_ent; accm = 0; bitno = 32; /* 1st valid bit in accm */ n_bits = db->n_bits; tgtbitno = 32 - n_bits; /* bitno when we have a code */ printk(KERN_DEBUG "bsd_decompress called\n"); if(!skb_in || !skb_out) { printk(KERN_ERR "bsd_decompress called with NULL parameter\n"); return DECOMP_ERROR; } /* * Get the sequence number. */ if( (p = skb_pull(skb_in,2)) == NULL) { return DECOMP_ERROR; } p-=2; seq = (p[0] << 8) + p[1]; ilen = skb_in->len; ibuf = skb_in->data; /* * Check the sequence number and give up if it differs from * the value we're expecting. */ if (seq != db->seqno) { if (db->debug) { printk(KERN_DEBUG "bsd_decomp%d: bad sequence # %d, expected %d\n", db->unit, seq, db->seqno - 1); } return DECOMP_ERROR; } ++db->seqno; db->bytes_out += ilen; if(skb_tailroom(skb_out) > 0) *(skb_put(skb_out,1)) = 0; else return DECOMP_ERR_NOMEM; oldcode = CLEAR; /* * Keep the checkpoint correctly so that incompressible packets * clear the dictionary at the proper times. */ for (;;) { if (ilen-- <= 0) { db->in_count += (skb_out->len - 1); /* don't count the header */ break; } /* * Accumulate bytes until we have a complete code. * Then get the next code, relying on the 32-bit, * unsigned accm to mask the result. */ bitno -= 8; accm |= *ibuf++ << bitno; if (tgtbitno < bitno) continue; incode = accm >> tgtbitno; accm <<= n_bits; bitno += n_bits; /* * The dictionary must only be cleared at the end of a packet. */ if (incode == CLEAR) { if (ilen > 0) { if (db->debug) printk(KERN_DEBUG "bsd_decomp%d: bad CLEAR\n", db->unit); return DECOMP_FATALERROR; /* probably a bug */ } bsd_clear(db); break; } if ((incode > max_ent + 2) || (incode > db->maxmaxcode) || (incode > max_ent && oldcode == CLEAR)) { if (db->debug) { printk(KERN_DEBUG "bsd_decomp%d: bad code 0x%x oldcode=0x%x ", db->unit, incode, oldcode); printk(KERN_DEBUG "max_ent=0x%x skb->Len=%d seqno=%d\n", max_ent, skb_out->len, db->seqno); } return DECOMP_FATALERROR; /* probably a bug */ } /* Special case for KwKwK string. */ if (incode > max_ent) { finchar = oldcode; extra = 1; } else { finchar = incode; extra = 0; } codelen = *(lens_ptr (db, finchar)); if( skb_tailroom(skb_out) < codelen + extra) { if (db->debug) { printk(KERN_DEBUG "bsd_decomp%d: ran out of mru\n", db->unit); #ifdef DEBUG printk(KERN_DEBUG " len=%d, finchar=0x%x, codelen=%d,skblen=%d\n", ilen, finchar, codelen, skb_out->len); #endif } return DECOMP_FATALERROR; } /* * Decode this code and install it in the decompressed buffer. */ p = skb_put(skb_out,codelen); p += codelen; while (finchar > LAST) { struct bsd_dict *dictp2 = dict_ptr (db, finchar); dictp = dict_ptr (db, dictp2->cptr); #ifdef DEBUG if (--codelen <= 0 || dictp->codem1 != finchar-1) { if (codelen <= 0) { printk(KERN_ERR "bsd_decomp%d: fell off end of chain ", db->unit); printk(KERN_ERR "0x%x at 0x%x by 0x%x, max_ent=0x%x\n", incode, finchar, dictp2->cptr, max_ent); } else { if (dictp->codem1 != finchar-1) { printk(KERN_ERR "bsd_decomp%d: bad code chain 0x%x finchar=0x%x ",db->unit, incode, finchar); printk(KERN_ERR "oldcode=0x%x cptr=0x%x codem1=0x%x\n", oldcode, dictp2->cptr, dictp->codem1); } } return DECOMP_FATALERROR; } #endif { u32 fcode = dictp->fcode; *--p = (fcode >> 16) & 0xff; finchar = fcode & 0xffff; } } *--p = finchar; #ifdef DEBUG if (--codelen != 0) printk(KERN_ERR "bsd_decomp%d: short by %d after code 0x%x, max_ent=0x%x\n", db->unit, codelen, incode, max_ent); #endif if (extra) /* the KwKwK case again */ *(skb_put(skb_out,1)) = finchar; /* * If not first code in a packet, and * if not out of code space, then allocate a new code. * * Keep the hash table correct so it can be used * with uncompressed packets. */ if (oldcode != CLEAR && max_ent < db->maxmaxcode) { struct bsd_dict *dictp2, *dictp3; u16 *lens1, *lens2; unsigned long fcode; int hval, disp, indx; fcode = BSD_KEY(oldcode,finchar); hval = BSD_HASH(oldcode,finchar,db->hshift); dictp = dict_ptr (db, hval); /* look for a free hash table entry */ if (dictp->codem1 < max_ent) { disp = (hval == 0) ? 1 : hval; do { hval += disp; if (hval >= db->hsize) hval -= db->hsize; dictp = dict_ptr (db, hval); } while (dictp->codem1 < max_ent); } /* * Invalidate previous hash table entry * assigned this code, and then take it over */ dictp2 = dict_ptr (db, max_ent + 1); indx = dictp2->cptr; dictp3 = dict_ptr (db, indx); if (dictp3->codem1 == max_ent) dictp3->codem1 = BADCODEM1; dictp2->cptr = hval; dictp->codem1 = max_ent; dictp->fcode = fcode; db->max_ent = ++max_ent; /* Update the length of this string. */ lens1 = lens_ptr (db, max_ent); lens2 = lens_ptr (db, oldcode); *lens1 = *lens2 + 1; /* Expand code size if needed. */ if (max_ent >= MAXCODE(n_bits) && max_ent < db->maxmaxcode) { db->n_bits = ++n_bits; tgtbitno = 32-n_bits; } } oldcode = incode; } ++db->comp_count; ++db->uncomp_count; db->comp_bytes += skb_in->len - BSD_OVHD; db->uncomp_bytes += skb_out->len; if (bsd_check(db)) { if (db->debug) printk(KERN_DEBUG "bsd_decomp%d: peer should have cleared dictionary on %d\n", db->unit, db->seqno - 1); } return skb_out->len; } /************************************************************* * Table of addresses for the BSD compression module *************************************************************/ static struct isdn_ppp_compressor ippp_bsd_compress = { .owner = THIS_MODULE, .num = CI_BSD_COMPRESS, .alloc = bsd_alloc, .free = bsd_free, .init = bsd_init, .reset = bsd_reset, .compress = bsd_compress, .decompress = bsd_decompress, .incomp = bsd_incomp, .stat = bsd_stats, }; /************************************************************* * Module support routines *************************************************************/ static int __init isdn_bsdcomp_init(void) { int answer = isdn_ppp_register_compressor (&ippp_bsd_compress); if (answer == 0) printk (KERN_INFO "PPP BSD Compression module registered\n"); return answer; } static void __exit isdn_bsdcomp_exit(void) { isdn_ppp_unregister_compressor (&ippp_bsd_compress); } module_init(isdn_bsdcomp_init); module_exit(isdn_bsdcomp_exit);
gpl-2.0
Gaojiquan/android_kernel_zte_digger
drivers/video/omap/lcd_mipid.c
4939
13930
/* * LCD driver for MIPI DBI-C / DCS compatible LCDs * * Copyright (C) 2006 Nokia Corporation * Author: Imre Deak <imre.deak@nokia.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/device.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/spi/spi.h> #include <linux/module.h> #include <plat/lcd_mipid.h> #include "omapfb.h" #define MIPID_MODULE_NAME "lcd_mipid" #define MIPID_CMD_READ_DISP_ID 0x04 #define MIPID_CMD_READ_RED 0x06 #define MIPID_CMD_READ_GREEN 0x07 #define MIPID_CMD_READ_BLUE 0x08 #define MIPID_CMD_READ_DISP_STATUS 0x09 #define MIPID_CMD_RDDSDR 0x0F #define MIPID_CMD_SLEEP_IN 0x10 #define MIPID_CMD_SLEEP_OUT 0x11 #define MIPID_CMD_DISP_OFF 0x28 #define MIPID_CMD_DISP_ON 0x29 #define MIPID_ESD_CHECK_PERIOD msecs_to_jiffies(5000) #define to_mipid_device(p) container_of(p, struct mipid_device, \ panel) struct mipid_device { int enabled; int revision; unsigned int saved_bklight_level; unsigned long hw_guard_end; /* next value of jiffies when we can issue the next sleep in/out command */ unsigned long hw_guard_wait; /* max guard time in jiffies */ struct omapfb_device *fbdev; struct spi_device *spi; struct mutex mutex; struct lcd_panel panel; struct workqueue_struct *esd_wq; struct delayed_work esd_work; void (*esd_check)(struct mipid_device *m); }; static void mipid_transfer(struct mipid_device *md, int cmd, const u8 *wbuf, int wlen, u8 *rbuf, int rlen) { struct spi_message m; struct spi_transfer *x, xfer[4]; u16 w; int r; BUG_ON(md->spi == NULL); spi_message_init(&m); memset(xfer, 0, sizeof(xfer)); x = &xfer[0]; cmd &= 0xff; x->tx_buf = &cmd; x->bits_per_word = 9; x->len = 2; spi_message_add_tail(x, &m); if (wlen) { x++; x->tx_buf = wbuf; x->len = wlen; x->bits_per_word = 9; spi_message_add_tail(x, &m); } if (rlen) { x++; x->rx_buf = &w; x->len = 1; spi_message_add_tail(x, &m); if (rlen > 1) { /* Arrange for the extra clock before the first * data bit. */ x->bits_per_word = 9; x->len = 2; x++; x->rx_buf = &rbuf[1]; x->len = rlen - 1; spi_message_add_tail(x, &m); } } r = spi_sync(md->spi, &m); if (r < 0) dev_dbg(&md->spi->dev, "spi_sync %d\n", r); if (rlen) rbuf[0] = w & 0xff; } static inline void mipid_cmd(struct mipid_device *md, int cmd) { mipid_transfer(md, cmd, NULL, 0, NULL, 0); } static inline void mipid_write(struct mipid_device *md, int reg, const u8 *buf, int len) { mipid_transfer(md, reg, buf, len, NULL, 0); } static inline void mipid_read(struct mipid_device *md, int reg, u8 *buf, int len) { mipid_transfer(md, reg, NULL, 0, buf, len); } static void set_data_lines(struct mipid_device *md, int data_lines) { u16 par; switch (data_lines) { case 16: par = 0x150; break; case 18: par = 0x160; break; case 24: par = 0x170; break; } mipid_write(md, 0x3a, (u8 *)&par, 2); } static void send_init_string(struct mipid_device *md) { u16 initpar[] = { 0x0102, 0x0100, 0x0100 }; mipid_write(md, 0xc2, (u8 *)initpar, sizeof(initpar)); set_data_lines(md, md->panel.data_lines); } static void hw_guard_start(struct mipid_device *md, int guard_msec) { md->hw_guard_wait = msecs_to_jiffies(guard_msec); md->hw_guard_end = jiffies + md->hw_guard_wait; } static void hw_guard_wait(struct mipid_device *md) { unsigned long wait = md->hw_guard_end - jiffies; if ((long)wait > 0 && wait <= md->hw_guard_wait) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(wait); } } static void set_sleep_mode(struct mipid_device *md, int on) { int cmd, sleep_time = 50; if (on) cmd = MIPID_CMD_SLEEP_IN; else cmd = MIPID_CMD_SLEEP_OUT; hw_guard_wait(md); mipid_cmd(md, cmd); hw_guard_start(md, 120); /* * When we enable the panel, it seems we _have_ to sleep * 120 ms before sending the init string. When disabling the * panel we'll sleep for the duration of 2 frames, so that the * controller can still provide the PCLK,HS,VS signals. */ if (!on) sleep_time = 120; msleep(sleep_time); } static void set_display_state(struct mipid_device *md, int enabled) { int cmd = enabled ? MIPID_CMD_DISP_ON : MIPID_CMD_DISP_OFF; mipid_cmd(md, cmd); } static int mipid_set_bklight_level(struct lcd_panel *panel, unsigned int level) { struct mipid_device *md = to_mipid_device(panel); struct mipid_platform_data *pd = md->spi->dev.platform_data; if (pd->get_bklight_max == NULL || pd->set_bklight_level == NULL) return -ENODEV; if (level > pd->get_bklight_max(pd)) return -EINVAL; if (!md->enabled) { md->saved_bklight_level = level; return 0; } pd->set_bklight_level(pd, level); return 0; } static unsigned int mipid_get_bklight_level(struct lcd_panel *panel) { struct mipid_device *md = to_mipid_device(panel); struct mipid_platform_data *pd = md->spi->dev.platform_data; if (pd->get_bklight_level == NULL) return -ENODEV; return pd->get_bklight_level(pd); } static unsigned int mipid_get_bklight_max(struct lcd_panel *panel) { struct mipid_device *md = to_mipid_device(panel); struct mipid_platform_data *pd = md->spi->dev.platform_data; if (pd->get_bklight_max == NULL) return -ENODEV; return pd->get_bklight_max(pd); } static unsigned long mipid_get_caps(struct lcd_panel *panel) { return OMAPFB_CAPS_SET_BACKLIGHT; } static u16 read_first_pixel(struct mipid_device *md) { u16 pixel; u8 red, green, blue; mutex_lock(&md->mutex); mipid_read(md, MIPID_CMD_READ_RED, &red, 1); mipid_read(md, MIPID_CMD_READ_GREEN, &green, 1); mipid_read(md, MIPID_CMD_READ_BLUE, &blue, 1); mutex_unlock(&md->mutex); switch (md->panel.data_lines) { case 16: pixel = ((red >> 1) << 11) | (green << 5) | (blue >> 1); break; case 24: /* 24 bit -> 16 bit */ pixel = ((red >> 3) << 11) | ((green >> 2) << 5) | (blue >> 3); break; default: pixel = 0; BUG(); } return pixel; } static int mipid_run_test(struct lcd_panel *panel, int test_num) { struct mipid_device *md = to_mipid_device(panel); static const u16 test_values[4] = { 0x0000, 0xffff, 0xaaaa, 0x5555, }; int i; if (test_num != MIPID_TEST_RGB_LINES) return MIPID_TEST_INVALID; for (i = 0; i < ARRAY_SIZE(test_values); i++) { int delay; unsigned long tmo; omapfb_write_first_pixel(md->fbdev, test_values[i]); tmo = jiffies + msecs_to_jiffies(100); delay = 25; while (1) { u16 pixel; msleep(delay); pixel = read_first_pixel(md); if (pixel == test_values[i]) break; if (time_after(jiffies, tmo)) { dev_err(&md->spi->dev, "MIPI LCD RGB I/F test failed: " "expecting %04x, got %04x\n", test_values[i], pixel); return MIPID_TEST_FAILED; } delay = 10; } } return 0; } static void ls041y3_esd_recover(struct mipid_device *md) { dev_err(&md->spi->dev, "performing LCD ESD recovery\n"); set_sleep_mode(md, 1); set_sleep_mode(md, 0); } static void ls041y3_esd_check_mode1(struct mipid_device *md) { u8 state1, state2; mipid_read(md, MIPID_CMD_RDDSDR, &state1, 1); set_sleep_mode(md, 0); mipid_read(md, MIPID_CMD_RDDSDR, &state2, 1); dev_dbg(&md->spi->dev, "ESD mode 1 state1 %02x state2 %02x\n", state1, state2); /* Each sleep out command will trigger a self diagnostic and flip * Bit6 if the test passes. */ if (!((state1 ^ state2) & (1 << 6))) ls041y3_esd_recover(md); } static void ls041y3_esd_check_mode2(struct mipid_device *md) { int i; u8 rbuf[2]; static const struct { int cmd; int wlen; u16 wbuf[3]; } *rd, rd_ctrl[7] = { { 0xb0, 4, { 0x0101, 0x01fe, } }, { 0xb1, 4, { 0x01de, 0x0121, } }, { 0xc2, 4, { 0x0100, 0x0100, } }, { 0xbd, 2, { 0x0100, } }, { 0xc2, 4, { 0x01fc, 0x0103, } }, { 0xb4, 0, }, { 0x00, 0, }, }; rd = rd_ctrl; for (i = 0; i < 3; i++, rd++) mipid_write(md, rd->cmd, (u8 *)rd->wbuf, rd->wlen); udelay(10); mipid_read(md, rd->cmd, rbuf, 2); rd++; for (i = 0; i < 3; i++, rd++) { udelay(10); mipid_write(md, rd->cmd, (u8 *)rd->wbuf, rd->wlen); } dev_dbg(&md->spi->dev, "ESD mode 2 state %02x\n", rbuf[1]); if (rbuf[1] == 0x00) ls041y3_esd_recover(md); } static void ls041y3_esd_check(struct mipid_device *md) { ls041y3_esd_check_mode1(md); if (md->revision >= 0x88) ls041y3_esd_check_mode2(md); } static void mipid_esd_start_check(struct mipid_device *md) { if (md->esd_check != NULL) queue_delayed_work(md->esd_wq, &md->esd_work, MIPID_ESD_CHECK_PERIOD); } static void mipid_esd_stop_check(struct mipid_device *md) { if (md->esd_check != NULL) cancel_delayed_work_sync(&md->esd_work); } static void mipid_esd_work(struct work_struct *work) { struct mipid_device *md = container_of(work, struct mipid_device, esd_work.work); mutex_lock(&md->mutex); md->esd_check(md); mutex_unlock(&md->mutex); mipid_esd_start_check(md); } static int mipid_enable(struct lcd_panel *panel) { struct mipid_device *md = to_mipid_device(panel); mutex_lock(&md->mutex); if (md->enabled) { mutex_unlock(&md->mutex); return 0; } set_sleep_mode(md, 0); md->enabled = 1; send_init_string(md); set_display_state(md, 1); mipid_set_bklight_level(panel, md->saved_bklight_level); mipid_esd_start_check(md); mutex_unlock(&md->mutex); return 0; } static void mipid_disable(struct lcd_panel *panel) { struct mipid_device *md = to_mipid_device(panel); /* * A final ESD work might be called before returning, * so do this without holding the lock. */ mipid_esd_stop_check(md); mutex_lock(&md->mutex); if (!md->enabled) { mutex_unlock(&md->mutex); return; } md->saved_bklight_level = mipid_get_bklight_level(panel); mipid_set_bklight_level(panel, 0); set_display_state(md, 0); set_sleep_mode(md, 1); md->enabled = 0; mutex_unlock(&md->mutex); } static int panel_enabled(struct mipid_device *md) { u32 disp_status; int enabled; mipid_read(md, MIPID_CMD_READ_DISP_STATUS, (u8 *)&disp_status, 4); disp_status = __be32_to_cpu(disp_status); enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10)); dev_dbg(&md->spi->dev, "LCD panel %senabled by bootloader (status 0x%04x)\n", enabled ? "" : "not ", disp_status); return enabled; } static int mipid_init(struct lcd_panel *panel, struct omapfb_device *fbdev) { struct mipid_device *md = to_mipid_device(panel); md->fbdev = fbdev; md->esd_wq = create_singlethread_workqueue("mipid_esd"); if (md->esd_wq == NULL) { dev_err(&md->spi->dev, "can't create ESD workqueue\n"); return -ENOMEM; } INIT_DELAYED_WORK(&md->esd_work, mipid_esd_work); mutex_init(&md->mutex); md->enabled = panel_enabled(md); if (md->enabled) mipid_esd_start_check(md); else md->saved_bklight_level = mipid_get_bklight_level(panel); return 0; } static void mipid_cleanup(struct lcd_panel *panel) { struct mipid_device *md = to_mipid_device(panel); if (md->enabled) mipid_esd_stop_check(md); destroy_workqueue(md->esd_wq); } static struct lcd_panel mipid_panel = { .config = OMAP_LCDC_PANEL_TFT, .bpp = 16, .x_res = 800, .y_res = 480, .pixel_clock = 21940, .hsw = 50, .hfp = 20, .hbp = 15, .vsw = 2, .vfp = 1, .vbp = 3, .init = mipid_init, .cleanup = mipid_cleanup, .enable = mipid_enable, .disable = mipid_disable, .get_caps = mipid_get_caps, .set_bklight_level = mipid_set_bklight_level, .get_bklight_level = mipid_get_bklight_level, .get_bklight_max = mipid_get_bklight_max, .run_test = mipid_run_test, }; static int mipid_detect(struct mipid_device *md) { struct mipid_platform_data *pdata; u8 display_id[3]; pdata = md->spi->dev.platform_data; if (pdata == NULL) { dev_err(&md->spi->dev, "missing platform data\n"); return -ENOENT; } mipid_read(md, MIPID_CMD_READ_DISP_ID, display_id, 3); dev_dbg(&md->spi->dev, "MIPI display ID: %02x%02x%02x\n", display_id[0], display_id[1], display_id[2]); switch (display_id[0]) { case 0x45: md->panel.name = "lph8923"; break; case 0x83: md->panel.name = "ls041y3"; md->esd_check = ls041y3_esd_check; break; default: md->panel.name = "unknown"; dev_err(&md->spi->dev, "invalid display ID\n"); return -ENODEV; } md->revision = display_id[1]; md->panel.data_lines = pdata->data_lines; pr_info("omapfb: %s rev %02x LCD detected, %d data lines\n", md->panel.name, md->revision, md->panel.data_lines); return 0; } static int mipid_spi_probe(struct spi_device *spi) { struct mipid_device *md; int r; md = kzalloc(sizeof(*md), GFP_KERNEL); if (md == NULL) { dev_err(&spi->dev, "out of memory\n"); return -ENOMEM; } spi->mode = SPI_MODE_0; md->spi = spi; dev_set_drvdata(&spi->dev, md); md->panel = mipid_panel; r = mipid_detect(md); if (r < 0) return r; omapfb_register_panel(&md->panel); return 0; } static int mipid_spi_remove(struct spi_device *spi) { struct mipid_device *md = dev_get_drvdata(&spi->dev); mipid_disable(&md->panel); kfree(md); return 0; } static struct spi_driver mipid_spi_driver = { .driver = { .name = MIPID_MODULE_NAME, .owner = THIS_MODULE, }, .probe = mipid_spi_probe, .remove = __devexit_p(mipid_spi_remove), }; module_spi_driver(mipid_spi_driver); MODULE_DESCRIPTION("MIPI display driver"); MODULE_LICENSE("GPL");
gpl-2.0
CandyDevices/kernel_htc_msm8974
drivers/mmc/host/dw_mmc-pci.c
4939
3636
/* * Synopsys DesignWare Multimedia Card PCI Interface driver * * Copyright (C) 2012 Vayavya Labs Pvt. Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/mmc/dw_mmc.h> #include "dw_mmc.h" #define PCI_BAR_NO 2 #define COMPLETE_BAR 0 #define SYNOPSYS_DW_MCI_VENDOR_ID 0x700 #define SYNOPSYS_DW_MCI_DEVICE_ID 0x1107 /* Defining the Capabilities */ #define DW_MCI_CAPABILITIES (MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED |\ MMC_CAP_SD_HIGHSPEED | MMC_CAP_8_BIT_DATA |\ MMC_CAP_SDIO_IRQ) static struct dw_mci_board pci_board_data = { .num_slots = 1, .caps = DW_MCI_CAPABILITIES, .bus_hz = 33 * 1000 * 1000, .detect_delay_ms = 200, .fifo_depth = 32, }; static int __devinit dw_mci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *entries) { struct dw_mci *host; int ret; ret = pci_enable_device(pdev); if (ret) return ret; if (pci_request_regions(pdev, "dw_mmc_pci")) { ret = -ENODEV; goto err_disable_dev; } host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL); if (!host) { ret = -ENOMEM; goto err_release; } host->irq = pdev->irq; host->irq_flags = IRQF_SHARED; host->dev = pdev->dev; host->pdata = &pci_board_data; host->regs = pci_iomap(pdev, PCI_BAR_NO, COMPLETE_BAR); if (!host->regs) { ret = -EIO; goto err_unmap; } pci_set_drvdata(pdev, host); ret = dw_mci_probe(host); if (ret) goto err_probe_failed; return ret; err_probe_failed: pci_iounmap(pdev, host->regs); err_unmap: kfree(host); err_release: pci_release_regions(pdev); err_disable_dev: pci_disable_device(pdev); return ret; } static void __devexit dw_mci_pci_remove(struct pci_dev *pdev) { struct dw_mci *host = pci_get_drvdata(pdev); dw_mci_remove(host); pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); pci_iounmap(pdev, host->regs); kfree(host); pci_disable_device(pdev); } #ifdef CONFIG_PM_SLEEP static int dw_mci_pci_suspend(struct device *dev) { int ret; struct pci_dev *pdev = to_pci_dev(dev); struct dw_mci *host = pci_get_drvdata(pdev); ret = dw_mci_suspend(host); return ret; } static int dw_mci_pci_resume(struct device *dev) { int ret; struct pci_dev *pdev = to_pci_dev(dev); struct dw_mci *host = pci_get_drvdata(pdev); ret = dw_mci_resume(host); return ret; } #else #define dw_mci_pci_suspend NULL #define dw_mci_pci_resume NULL #endif /* CONFIG_PM_SLEEP */ static SIMPLE_DEV_PM_OPS(dw_mci_pci_pmops, dw_mci_pci_suspend, dw_mci_pci_resume); static DEFINE_PCI_DEVICE_TABLE(dw_mci_pci_id) = { { PCI_DEVICE(SYNOPSYS_DW_MCI_VENDOR_ID, SYNOPSYS_DW_MCI_DEVICE_ID) }, {} }; MODULE_DEVICE_TABLE(pci, dw_mci_pci_id); static struct pci_driver dw_mci_pci_driver = { .name = "dw_mmc_pci", .id_table = dw_mci_pci_id, .probe = dw_mci_pci_probe, .remove = dw_mci_pci_remove, .driver = { .pm = &dw_mci_pci_pmops }, }; static int __init dw_mci_init(void) { return pci_register_driver(&dw_mci_pci_driver); } static void __exit dw_mci_exit(void) { pci_unregister_driver(&dw_mci_pci_driver); } module_init(dw_mci_init); module_exit(dw_mci_exit); MODULE_DESCRIPTION("DW Multimedia Card PCI Interface driver"); MODULE_AUTHOR("Shashidhar Hiremath <shashidharh@vayavyalabs.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
Foxda-Tech/polaris-kernel
drivers/media/i2c/bt819.c
7243
14285
/* * bt819 - BT819A VideoStream Decoder (Rockwell Part) * * Copyright (C) 1999 Mike Bernson <mike@mlb.org> * Copyright (C) 1998 Dave Perks <dperks@ibm.net> * * Modifications for LML33/DC10plus unified driver * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx> * * Changes by Ronald Bultje <rbultje@ronald.bitfreak.net> * - moved over to linux>=2.4.x i2c protocol (9/9/2002) * * This code was modify/ported from the saa7111 driver written * by Dave Perks. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/ioctl.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-ctrls.h> #include <media/bt819.h> MODULE_DESCRIPTION("Brooktree-819 video decoder driver"); MODULE_AUTHOR("Mike Bernson & Dave Perks"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /* ----------------------------------------------------------------------- */ struct bt819 { struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; unsigned char reg[32]; v4l2_std_id norm; int ident; int input; int enable; }; static inline struct bt819 *to_bt819(struct v4l2_subdev *sd) { return container_of(sd, struct bt819, sd); } static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct bt819, hdl)->sd; } struct timing { int hactive; int hdelay; int vactive; int vdelay; int hscale; int vscale; }; /* for values, see the bt819 datasheet */ static struct timing timing_data[] = { {864 - 24, 20, 625 - 2, 1, 0x0504, 0x0000}, {858 - 24, 20, 525 - 2, 1, 0x00f8, 0x0000}, }; /* ----------------------------------------------------------------------- */ static inline int bt819_write(struct bt819 *decoder, u8 reg, u8 value) { struct i2c_client *client = v4l2_get_subdevdata(&decoder->sd); decoder->reg[reg] = value; return i2c_smbus_write_byte_data(client, reg, value); } static inline int bt819_setbit(struct bt819 *decoder, u8 reg, u8 bit, u8 value) { return bt819_write(decoder, reg, (decoder->reg[reg] & ~(1 << bit)) | (value ? (1 << bit) : 0)); } static int bt819_write_block(struct bt819 *decoder, const u8 *data, unsigned int len) { struct i2c_client *client = v4l2_get_subdevdata(&decoder->sd); int ret = -1; u8 reg; /* the bt819 has an autoincrement function, use it if * the adapter understands raw I2C */ if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { /* do raw I2C, not smbus compatible */ u8 block_data[32]; int block_len; while (len >= 2) { block_len = 0; block_data[block_len++] = reg = data[0]; do { block_data[block_len++] = decoder->reg[reg++] = data[1]; len -= 2; data += 2; } while (len >= 2 && data[0] == reg && block_len < 32); ret = i2c_master_send(client, block_data, block_len); if (ret < 0) break; } } else { /* do some slow I2C emulation kind of thing */ while (len >= 2) { reg = *data++; ret = bt819_write(decoder, reg, *data++); if (ret < 0) break; len -= 2; } } return ret; } static inline int bt819_read(struct bt819 *decoder, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(&decoder->sd); return i2c_smbus_read_byte_data(client, reg); } static int bt819_init(struct v4l2_subdev *sd) { static unsigned char init[] = { /*0x1f, 0x00,*/ /* Reset */ 0x01, 0x59, /* 0x01 input format */ 0x02, 0x00, /* 0x02 temporal decimation */ 0x03, 0x12, /* 0x03 Cropping msb */ 0x04, 0x16, /* 0x04 Vertical Delay, lsb */ 0x05, 0xe0, /* 0x05 Vertical Active lsb */ 0x06, 0x80, /* 0x06 Horizontal Delay lsb */ 0x07, 0xd0, /* 0x07 Horizontal Active lsb */ 0x08, 0x00, /* 0x08 Horizontal Scaling msb */ 0x09, 0xf8, /* 0x09 Horizontal Scaling lsb */ 0x0a, 0x00, /* 0x0a Brightness control */ 0x0b, 0x30, /* 0x0b Miscellaneous control */ 0x0c, 0xd8, /* 0x0c Luma Gain lsb */ 0x0d, 0xfe, /* 0x0d Chroma Gain (U) lsb */ 0x0e, 0xb4, /* 0x0e Chroma Gain (V) msb */ 0x0f, 0x00, /* 0x0f Hue control */ 0x12, 0x04, /* 0x12 Output Format */ 0x13, 0x20, /* 0x13 Vertial Scaling msb 0x00 chroma comb OFF, line drop scaling, interlace scaling BUG? Why does turning the chroma comb on fuck up color? Bug in the bt819 stepping on my board? */ 0x14, 0x00, /* 0x14 Vertial Scaling lsb */ 0x16, 0x07, /* 0x16 Video Timing Polarity ACTIVE=active low FIELD: high=odd, vreset=active high, hreset=active high */ 0x18, 0x68, /* 0x18 AGC Delay */ 0x19, 0x5d, /* 0x19 Burst Gate Delay */ 0x1a, 0x80, /* 0x1a ADC Interface */ }; struct bt819 *decoder = to_bt819(sd); struct timing *timing = &timing_data[(decoder->norm & V4L2_STD_525_60) ? 1 : 0]; init[0x03 * 2 - 1] = (((timing->vdelay >> 8) & 0x03) << 6) | (((timing->vactive >> 8) & 0x03) << 4) | (((timing->hdelay >> 8) & 0x03) << 2) | ((timing->hactive >> 8) & 0x03); init[0x04 * 2 - 1] = timing->vdelay & 0xff; init[0x05 * 2 - 1] = timing->vactive & 0xff; init[0x06 * 2 - 1] = timing->hdelay & 0xff; init[0x07 * 2 - 1] = timing->hactive & 0xff; init[0x08 * 2 - 1] = timing->hscale >> 8; init[0x09 * 2 - 1] = timing->hscale & 0xff; /* 0x15 in array is address 0x19 */ init[0x15 * 2 - 1] = (decoder->norm & V4L2_STD_625_50) ? 115 : 93; /* Chroma burst delay */ /* reset */ bt819_write(decoder, 0x1f, 0x00); mdelay(1); /* init */ return bt819_write_block(decoder, init, sizeof(init)); } /* ----------------------------------------------------------------------- */ static int bt819_status(struct v4l2_subdev *sd, u32 *pstatus, v4l2_std_id *pstd) { struct bt819 *decoder = to_bt819(sd); int status = bt819_read(decoder, 0x00); int res = V4L2_IN_ST_NO_SIGNAL; v4l2_std_id std; if ((status & 0x80)) res = 0; if ((status & 0x10)) std = V4L2_STD_PAL; else std = V4L2_STD_NTSC; if (pstd) *pstd = std; if (pstatus) *pstatus = res; v4l2_dbg(1, debug, sd, "get status %x\n", status); return 0; } static int bt819_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) { return bt819_status(sd, NULL, std); } static int bt819_g_input_status(struct v4l2_subdev *sd, u32 *status) { return bt819_status(sd, status, NULL); } static int bt819_s_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct bt819 *decoder = to_bt819(sd); struct timing *timing = NULL; v4l2_dbg(1, debug, sd, "set norm %llx\n", (unsigned long long)std); if (sd->v4l2_dev == NULL || sd->v4l2_dev->notify == NULL) v4l2_err(sd, "no notify found!\n"); if (std & V4L2_STD_NTSC) { v4l2_subdev_notify(sd, BT819_FIFO_RESET_LOW, NULL); bt819_setbit(decoder, 0x01, 0, 1); bt819_setbit(decoder, 0x01, 1, 0); bt819_setbit(decoder, 0x01, 5, 0); bt819_write(decoder, 0x18, 0x68); bt819_write(decoder, 0x19, 0x5d); /* bt819_setbit(decoder, 0x1a, 5, 1); */ timing = &timing_data[1]; } else if (std & V4L2_STD_PAL) { v4l2_subdev_notify(sd, BT819_FIFO_RESET_LOW, NULL); bt819_setbit(decoder, 0x01, 0, 1); bt819_setbit(decoder, 0x01, 1, 1); bt819_setbit(decoder, 0x01, 5, 1); bt819_write(decoder, 0x18, 0x7f); bt819_write(decoder, 0x19, 0x72); /* bt819_setbit(decoder, 0x1a, 5, 0); */ timing = &timing_data[0]; } else { v4l2_dbg(1, debug, sd, "unsupported norm %llx\n", (unsigned long long)std); return -EINVAL; } bt819_write(decoder, 0x03, (((timing->vdelay >> 8) & 0x03) << 6) | (((timing->vactive >> 8) & 0x03) << 4) | (((timing->hdelay >> 8) & 0x03) << 2) | ((timing->hactive >> 8) & 0x03)); bt819_write(decoder, 0x04, timing->vdelay & 0xff); bt819_write(decoder, 0x05, timing->vactive & 0xff); bt819_write(decoder, 0x06, timing->hdelay & 0xff); bt819_write(decoder, 0x07, timing->hactive & 0xff); bt819_write(decoder, 0x08, (timing->hscale >> 8) & 0xff); bt819_write(decoder, 0x09, timing->hscale & 0xff); decoder->norm = std; v4l2_subdev_notify(sd, BT819_FIFO_RESET_HIGH, NULL); return 0; } static int bt819_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct bt819 *decoder = to_bt819(sd); v4l2_dbg(1, debug, sd, "set input %x\n", input); if (input > 7) return -EINVAL; if (sd->v4l2_dev == NULL || sd->v4l2_dev->notify == NULL) v4l2_err(sd, "no notify found!\n"); if (decoder->input != input) { v4l2_subdev_notify(sd, BT819_FIFO_RESET_LOW, NULL); decoder->input = input; /* select mode */ if (decoder->input == 0) { bt819_setbit(decoder, 0x0b, 6, 0); bt819_setbit(decoder, 0x1a, 1, 1); } else { bt819_setbit(decoder, 0x0b, 6, 1); bt819_setbit(decoder, 0x1a, 1, 0); } v4l2_subdev_notify(sd, BT819_FIFO_RESET_HIGH, NULL); } return 0; } static int bt819_s_stream(struct v4l2_subdev *sd, int enable) { struct bt819 *decoder = to_bt819(sd); v4l2_dbg(1, debug, sd, "enable output %x\n", enable); if (decoder->enable != enable) { decoder->enable = enable; bt819_setbit(decoder, 0x16, 7, !enable); } return 0; } static int bt819_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct bt819 *decoder = to_bt819(sd); int temp; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: bt819_write(decoder, 0x0a, ctrl->val); break; case V4L2_CID_CONTRAST: bt819_write(decoder, 0x0c, ctrl->val & 0xff); bt819_setbit(decoder, 0x0b, 2, ((ctrl->val >> 8) & 0x01)); break; case V4L2_CID_SATURATION: bt819_write(decoder, 0x0d, (ctrl->val >> 7) & 0xff); bt819_setbit(decoder, 0x0b, 1, ((ctrl->val >> 15) & 0x01)); /* Ratio between U gain and V gain must stay the same as the ratio between the default U and V gain values. */ temp = (ctrl->val * 180) / 254; bt819_write(decoder, 0x0e, (temp >> 7) & 0xff); bt819_setbit(decoder, 0x0b, 0, (temp >> 15) & 0x01); break; case V4L2_CID_HUE: bt819_write(decoder, 0x0f, ctrl->val); break; default: return -EINVAL; } return 0; } static int bt819_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct bt819 *decoder = to_bt819(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, decoder->ident, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_ctrl_ops bt819_ctrl_ops = { .s_ctrl = bt819_s_ctrl, }; static const struct v4l2_subdev_core_ops bt819_core_ops = { .g_chip_ident = bt819_g_chip_ident, .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, .g_ctrl = v4l2_subdev_g_ctrl, .s_ctrl = v4l2_subdev_s_ctrl, .queryctrl = v4l2_subdev_queryctrl, .querymenu = v4l2_subdev_querymenu, .s_std = bt819_s_std, }; static const struct v4l2_subdev_video_ops bt819_video_ops = { .s_routing = bt819_s_routing, .s_stream = bt819_s_stream, .querystd = bt819_querystd, .g_input_status = bt819_g_input_status, }; static const struct v4l2_subdev_ops bt819_ops = { .core = &bt819_core_ops, .video = &bt819_video_ops, }; /* ----------------------------------------------------------------------- */ static int bt819_probe(struct i2c_client *client, const struct i2c_device_id *id) { int i, ver; struct bt819 *decoder; struct v4l2_subdev *sd; const char *name; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; decoder = kzalloc(sizeof(struct bt819), GFP_KERNEL); if (decoder == NULL) return -ENOMEM; sd = &decoder->sd; v4l2_i2c_subdev_init(sd, client, &bt819_ops); ver = bt819_read(decoder, 0x17); switch (ver & 0xf0) { case 0x70: name = "bt819a"; decoder->ident = V4L2_IDENT_BT819A; break; case 0x60: name = "bt817a"; decoder->ident = V4L2_IDENT_BT817A; break; case 0x20: name = "bt815a"; decoder->ident = V4L2_IDENT_BT815A; break; default: v4l2_dbg(1, debug, sd, "unknown chip version 0x%02x\n", ver); return -ENODEV; } v4l_info(client, "%s found @ 0x%x (%s)\n", name, client->addr << 1, client->adapter->name); decoder->norm = V4L2_STD_NTSC; decoder->input = 0; decoder->enable = 1; i = bt819_init(sd); if (i < 0) v4l2_dbg(1, debug, sd, "init status %d\n", i); v4l2_ctrl_handler_init(&decoder->hdl, 4); v4l2_ctrl_new_std(&decoder->hdl, &bt819_ctrl_ops, V4L2_CID_BRIGHTNESS, -128, 127, 1, 0); v4l2_ctrl_new_std(&decoder->hdl, &bt819_ctrl_ops, V4L2_CID_CONTRAST, 0, 511, 1, 0xd8); v4l2_ctrl_new_std(&decoder->hdl, &bt819_ctrl_ops, V4L2_CID_SATURATION, 0, 511, 1, 0xfe); v4l2_ctrl_new_std(&decoder->hdl, &bt819_ctrl_ops, V4L2_CID_HUE, -128, 127, 1, 0); sd->ctrl_handler = &decoder->hdl; if (decoder->hdl.error) { int err = decoder->hdl.error; v4l2_ctrl_handler_free(&decoder->hdl); kfree(decoder); return err; } v4l2_ctrl_handler_setup(&decoder->hdl); return 0; } static int bt819_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct bt819 *decoder = to_bt819(sd); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(&decoder->hdl); kfree(decoder); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id bt819_id[] = { { "bt819a", 0 }, { "bt817a", 0 }, { "bt815a", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, bt819_id); static struct i2c_driver bt819_driver = { .driver = { .owner = THIS_MODULE, .name = "bt819", }, .probe = bt819_probe, .remove = bt819_remove, .id_table = bt819_id, }; module_i2c_driver(bt819_driver);
gpl-2.0
boddob/linux
drivers/atm/zatm.c
76
44446
/* drivers/atm/zatm.c - ZeitNet ZN122x device driver */ /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/atm.h> #include <linux/atmdev.h> #include <linux/sonet.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/uio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/atm_zatm.h> #include <linux/capability.h> #include <linux/bitops.h> #include <linux/wait.h> #include <linux/slab.h> #include <asm/byteorder.h> #include <asm/string.h> #include <asm/io.h> #include <linux/atomic.h> #include <asm/uaccess.h> #include "uPD98401.h" #include "uPD98402.h" #include "zeprom.h" #include "zatm.h" /* * TODO: * * Minor features * - support 64 kB SDUs (will have to use multibuffer batches then :-( ) * - proper use of CDV, credit = max(1,CDVT*PCR) * - AAL0 * - better receive timestamps * - OAM */ #define ZATM_COPPER 1 #if 0 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) #else #define DPRINTK(format,args...) #endif #ifndef CONFIG_ATM_ZATM_DEBUG #define NULLCHECK(x) #define EVENT(s,a,b) static void event_dump(void) { } #else /* * NULL pointer checking */ #define NULLCHECK(x) \ if ((unsigned long) (x) < 0x30) printk(KERN_CRIT #x "==0x%x\n", (int) (x)) /* * Very extensive activity logging. Greatly improves bug detection speed but * costs a few Mbps if enabled. */ #define EV 64 static const char *ev[EV]; static unsigned long ev_a[EV],ev_b[EV]; static int ec = 0; static void EVENT(const char *s,unsigned long a,unsigned long b) { ev[ec] = s; ev_a[ec] = a; ev_b[ec] = b; ec = (ec+1) % EV; } static void event_dump(void) { int n,i; printk(KERN_NOTICE "----- event dump follows -----\n"); for (n = 0; n < EV; n++) { i = (ec+n) % EV; printk(KERN_NOTICE); printk(ev[i] ? ev[i] : "(null)",ev_a[i],ev_b[i]); } printk(KERN_NOTICE "----- event dump ends here -----\n"); } #endif /* CONFIG_ATM_ZATM_DEBUG */ #define RING_BUSY 1 /* indication from do_tx that PDU has to be backlogged */ static struct atm_dev *zatm_boards = NULL; static unsigned long dummy[2] = {0,0}; #define zin_n(r) inl(zatm_dev->base+r*4) #define zin(r) inl(zatm_dev->base+uPD98401_##r*4) #define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4) #define zwait while (zin(CMR) & uPD98401_BUSY) /* RX0, RX1, TX0, TX1 */ static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 }; static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */ #define MBX_SIZE(i) (mbx_entries[i]*mbx_esize[i]) /*-------------------------------- utilities --------------------------------*/ static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr) { zwait; zout(value,CER); zout(uPD98401_IND_ACC | uPD98401_IA_BALL | (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); } static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr) { zwait; zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW | (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); zwait; return zin(CER); } /*------------------------------- free lists --------------------------------*/ /* * Free buffer head structure: * [0] pointer to buffer (for SAR) * [1] buffer descr link pointer (for SAR) * [2] back pointer to skb (for poll_rx) * [3] data * ... */ struct rx_buffer_head { u32 buffer; /* pointer to buffer (for SAR) */ u32 link; /* buffer descriptor link pointer (for SAR) */ struct sk_buff *skb; /* back pointer to skb (for poll_rx) */ }; static void refill_pool(struct atm_dev *dev,int pool) { struct zatm_dev *zatm_dev; struct sk_buff *skb; struct rx_buffer_head *first; unsigned long flags; int align,offset,free,count,size; EVENT("refill_pool\n",0,0); zatm_dev = ZATM_DEV(dev); size = (64 << (pool <= ZATM_AAL5_POOL_BASE ? 0 : pool-ZATM_AAL5_POOL_BASE))+sizeof(struct rx_buffer_head); if (size < PAGE_SIZE) { align = 32; /* for 32 byte alignment */ offset = sizeof(struct rx_buffer_head); } else { align = 4096; offset = zatm_dev->pool_info[pool].offset+ sizeof(struct rx_buffer_head); } size += align; spin_lock_irqsave(&zatm_dev->lock, flags); free = zpeekl(zatm_dev,zatm_dev->pool_base+2*pool) & uPD98401_RXFP_REMAIN; spin_unlock_irqrestore(&zatm_dev->lock, flags); if (free >= zatm_dev->pool_info[pool].low_water) return; EVENT("starting ... POOL: 0x%x, 0x%x\n", zpeekl(zatm_dev,zatm_dev->pool_base+2*pool), zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1)); EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); count = 0; first = NULL; while (free < zatm_dev->pool_info[pool].high_water) { struct rx_buffer_head *head; skb = alloc_skb(size,GFP_ATOMIC); if (!skb) { printk(KERN_WARNING DEV_LABEL "(Itf %d): got no new " "skb (%d) with %d free\n",dev->number,size,free); break; } skb_reserve(skb,(unsigned char *) ((((unsigned long) skb->data+ align+offset-1) & ~(unsigned long) (align-1))-offset)- skb->data); head = (struct rx_buffer_head *) skb->data; skb_reserve(skb,sizeof(struct rx_buffer_head)); if (!first) first = head; count++; head->buffer = virt_to_bus(skb->data); head->link = 0; head->skb = skb; EVENT("enq skb 0x%08lx/0x%08lx\n",(unsigned long) skb, (unsigned long) head); spin_lock_irqsave(&zatm_dev->lock, flags); if (zatm_dev->last_free[pool]) ((struct rx_buffer_head *) (zatm_dev->last_free[pool]-> data))[-1].link = virt_to_bus(head); zatm_dev->last_free[pool] = skb; skb_queue_tail(&zatm_dev->pool[pool],skb); spin_unlock_irqrestore(&zatm_dev->lock, flags); free++; } if (first) { spin_lock_irqsave(&zatm_dev->lock, flags); zwait; zout(virt_to_bus(first),CER); zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count, CMR); spin_unlock_irqrestore(&zatm_dev->lock, flags); EVENT ("POOL: 0x%x, 0x%x\n", zpeekl(zatm_dev,zatm_dev->pool_base+2*pool), zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1)); EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); } } static void drain_free(struct atm_dev *dev,int pool) { skb_queue_purge(&ZATM_DEV(dev)->pool[pool]); } static int pool_index(int max_pdu) { int i; if (max_pdu % ATM_CELL_PAYLOAD) printk(KERN_ERR DEV_LABEL ": driver error in pool_index: " "max_pdu is %d\n",max_pdu); if (max_pdu > 65536) return -1; for (i = 0; (64 << i) < max_pdu; i++); return i+ZATM_AAL5_POOL_BASE; } /* use_pool isn't reentrant */ static void use_pool(struct atm_dev *dev,int pool) { struct zatm_dev *zatm_dev; unsigned long flags; int size; zatm_dev = ZATM_DEV(dev); if (!(zatm_dev->pool_info[pool].ref_count++)) { skb_queue_head_init(&zatm_dev->pool[pool]); size = pool-ZATM_AAL5_POOL_BASE; if (size < 0) size = 0; /* 64B... */ else if (size > 10) size = 10; /* ... 64kB */ spin_lock_irqsave(&zatm_dev->lock, flags); zpokel(zatm_dev,((zatm_dev->pool_info[pool].low_water/4) << uPD98401_RXFP_ALERT_SHIFT) | (1 << uPD98401_RXFP_BTSZ_SHIFT) | (size << uPD98401_RXFP_BFSZ_SHIFT), zatm_dev->pool_base+pool*2); zpokel(zatm_dev,(unsigned long) dummy,zatm_dev->pool_base+ pool*2+1); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_dev->last_free[pool] = NULL; refill_pool(dev,pool); } DPRINTK("pool %d: %d\n",pool,zatm_dev->pool_info[pool].ref_count); } static void unuse_pool(struct atm_dev *dev,int pool) { if (!(--ZATM_DEV(dev)->pool_info[pool].ref_count)) drain_free(dev,pool); } /*----------------------------------- RX ------------------------------------*/ #if 0 static void exception(struct atm_vcc *vcc) { static int count = 0; struct zatm_dev *zatm_dev = ZATM_DEV(vcc->dev); struct zatm_vcc *zatm_vcc = ZATM_VCC(vcc); unsigned long *qrp; int i; if (count++ > 2) return; for (i = 0; i < 8; i++) printk("TX%d: 0x%08lx\n",i, zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+i)); for (i = 0; i < 5; i++) printk("SH%d: 0x%08lx\n",i, zpeekl(zatm_dev,uPD98401_IM(zatm_vcc->shaper)+16*i)); qrp = (unsigned long *) zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+ uPD98401_TXVC_QRP); printk("qrp=0x%08lx\n",(unsigned long) qrp); for (i = 0; i < 4; i++) printk("QRP[%d]: 0x%08lx",i,qrp[i]); } #endif static const char *err_txt[] = { "No error", "RX buf underflow", "RX FIFO overrun", "Maximum len violation", "CRC error", "User abort", "Length violation", "T1 error", "Deactivated", "???", "???", "???", "???", "???", "???", "???" }; static void poll_rx(struct atm_dev *dev,int mbx) { struct zatm_dev *zatm_dev; unsigned long pos; u32 x; int error; EVENT("poll_rx\n",0,0); zatm_dev = ZATM_DEV(dev); pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx)); while (x = zin(MWA(mbx)), (pos & 0xffff) != x) { u32 *here; struct sk_buff *skb; struct atm_vcc *vcc; int cells,size,chan; EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x); here = (u32 *) pos; if (((pos += 16) & 0xffff) == zatm_dev->mbx_end[mbx]) pos = zatm_dev->mbx_start[mbx]; cells = here[0] & uPD98401_AAL5_SIZE; #if 0 printk("RX IND: 0x%x, 0x%x, 0x%x, 0x%x\n",here[0],here[1],here[2],here[3]); { unsigned long *x; printk("POOL: 0x%08x, 0x%08x\n",zpeekl(zatm_dev, zatm_dev->pool_base), zpeekl(zatm_dev,zatm_dev->pool_base+1)); x = (unsigned long *) here[2]; printk("[0..3] = 0x%08lx, 0x%08lx, 0x%08lx, 0x%08lx\n", x[0],x[1],x[2],x[3]); } #endif error = 0; if (here[3] & uPD98401_AAL5_ERR) { error = (here[3] & uPD98401_AAL5_ES) >> uPD98401_AAL5_ES_SHIFT; if (error == uPD98401_AAL5_ES_DEACT || error == uPD98401_AAL5_ES_FREE) continue; } EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >> uPD98401_AAL5_ES_SHIFT,error); skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb; __net_timestamp(skb); #if 0 printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3], ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1], ((unsigned *) skb->data)[0]); #endif EVENT("skb 0x%lx, here 0x%lx\n",(unsigned long) skb, (unsigned long) here); #if 0 printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); #endif size = error ? 0 : ntohs(((__be16 *) skb->data)[cells* ATM_CELL_PAYLOAD/sizeof(u16)-3]); EVENT("got skb 0x%lx, size %d\n",(unsigned long) skb,size); chan = (here[3] & uPD98401_AAL5_CHAN) >> uPD98401_AAL5_CHAN_SHIFT; if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) { int pos; vcc = zatm_dev->rx_map[chan]; pos = ZATM_VCC(vcc)->pool; if (skb == zatm_dev->last_free[pos]) zatm_dev->last_free[pos] = NULL; skb_unlink(skb, zatm_dev->pool + pos); } else { printk(KERN_ERR DEV_LABEL "(itf %d): RX indication " "for non-existing channel\n",dev->number); size = 0; vcc = NULL; event_dump(); } if (error) { static unsigned long silence = 0; static int last_error = 0; if (error != last_error || time_after(jiffies, silence) || silence == 0){ printk(KERN_WARNING DEV_LABEL "(itf %d): " "chan %d error %s\n",dev->number,chan, err_txt[error]); last_error = error; silence = (jiffies+2*HZ)|1; } size = 0; } if (size && (size > cells*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER || size <= (cells-1)*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER)) { printk(KERN_ERR DEV_LABEL "(itf %d): size %d with %d " "cells\n",dev->number,size,cells); size = 0; event_dump(); } if (size > ATM_MAX_AAL5_PDU) { printk(KERN_ERR DEV_LABEL "(itf %d): size too big " "(%d)\n",dev->number,size); size = 0; event_dump(); } if (!size) { dev_kfree_skb_irq(skb); if (vcc) atomic_inc(&vcc->stats->rx_err); continue; } if (!atm_charge(vcc,skb->truesize)) { dev_kfree_skb_irq(skb); continue; } skb->len = size; ATM_SKB(skb)->vcc = vcc; vcc->push(vcc,skb); atomic_inc(&vcc->stats->rx); } zout(pos & 0xffff,MTA(mbx)); #if 0 /* probably a stupid idea */ refill_pool(dev,zatm_vcc->pool); /* maybe this saves us a few interrupts */ #endif } static int open_rx_first(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; unsigned short chan; int cells; DPRINTK("open_rx_first (0x%x)\n",inb_p(0xc053)); zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); zatm_vcc->rx_chan = 0; if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0; if (vcc->qos.aal == ATM_AAL5) { if (vcc->qos.rxtp.max_sdu > 65464) vcc->qos.rxtp.max_sdu = 65464; /* fix this - we may want to receive 64kB SDUs later */ cells = DIV_ROUND_UP(vcc->qos.rxtp.max_sdu + ATM_AAL5_TRAILER, ATM_CELL_PAYLOAD); zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD); } else { cells = 1; zatm_vcc->pool = ZATM_AAL0_POOL; } if (zatm_vcc->pool < 0) return -EMSGSIZE; spin_lock_irqsave(&zatm_dev->lock, flags); zwait; zout(uPD98401_OPEN_CHAN,CMR); zwait; DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; spin_unlock_irqrestore(&zatm_dev->lock, flags); DPRINTK("chan is %d\n",chan); if (!chan) return -EAGAIN; use_pool(vcc->dev,zatm_vcc->pool); DPRINTK("pool %d\n",zatm_vcc->pool); /* set up VC descriptor */ spin_lock_irqsave(&zatm_dev->lock, flags); zpokel(zatm_dev,zatm_vcc->pool << uPD98401_RXVC_POOL_SHIFT, chan*VC_SIZE/4); zpokel(zatm_dev,uPD98401_RXVC_OD | (vcc->qos.aal == ATM_AAL5 ? uPD98401_RXVC_AR : 0) | cells,chan*VC_SIZE/4+1); zpokel(zatm_dev,0,chan*VC_SIZE/4+2); zatm_vcc->rx_chan = chan; zatm_dev->rx_map[chan] = vcc; spin_unlock_irqrestore(&zatm_dev->lock, flags); return 0; } static int open_rx_second(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; int pos,shift; DPRINTK("open_rx_second (0x%x)\n",inb_p(0xc053)); zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); if (!zatm_vcc->rx_chan) return 0; spin_lock_irqsave(&zatm_dev->lock, flags); /* should also handle VPI @@@ */ pos = vcc->vci >> 1; shift = (1-(vcc->vci & 1)) << 4; zpokel(zatm_dev,(zpeekl(zatm_dev,pos) & ~(0xffff << shift)) | ((zatm_vcc->rx_chan | uPD98401_RXLT_ENBL) << shift),pos); spin_unlock_irqrestore(&zatm_dev->lock, flags); return 0; } static void close_rx(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; int pos,shift; zatm_vcc = ZATM_VCC(vcc); zatm_dev = ZATM_DEV(vcc->dev); if (!zatm_vcc->rx_chan) return; DPRINTK("close_rx\n"); /* disable receiver */ if (vcc->vpi != ATM_VPI_UNSPEC && vcc->vci != ATM_VCI_UNSPEC) { spin_lock_irqsave(&zatm_dev->lock, flags); pos = vcc->vci >> 1; shift = (1-(vcc->vci & 1)) << 4; zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos); zwait; zout(uPD98401_NOP,CMR); zwait; zout(uPD98401_NOP,CMR); spin_unlock_irqrestore(&zatm_dev->lock, flags); } spin_lock_irqsave(&zatm_dev->lock, flags); zwait; zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); zwait; udelay(10); /* why oh why ... ? */ zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); zwait; if (!(zin(CMR) & uPD98401_CHAN_ADDR)) printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel " "%d\n",vcc->dev->number,zatm_vcc->rx_chan); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_dev->rx_map[zatm_vcc->rx_chan] = NULL; zatm_vcc->rx_chan = 0; unuse_pool(vcc->dev,zatm_vcc->pool); } static int start_rx(struct atm_dev *dev) { struct zatm_dev *zatm_dev; int i; DPRINTK("start_rx\n"); zatm_dev = ZATM_DEV(dev); zatm_dev->rx_map = kcalloc(zatm_dev->chans, sizeof(*zatm_dev->rx_map), GFP_KERNEL); if (!zatm_dev->rx_map) return -ENOMEM; /* set VPI/VCI split (use all VCIs and give what's left to VPIs) */ zpokel(zatm_dev,(1 << dev->ci_range.vci_bits)-1,uPD98401_VRR); /* prepare free buffer pools */ for (i = 0; i <= ZATM_LAST_POOL; i++) { zatm_dev->pool_info[i].ref_count = 0; zatm_dev->pool_info[i].rqa_count = 0; zatm_dev->pool_info[i].rqu_count = 0; zatm_dev->pool_info[i].low_water = LOW_MARK; zatm_dev->pool_info[i].high_water = HIGH_MARK; zatm_dev->pool_info[i].offset = 0; zatm_dev->pool_info[i].next_off = 0; zatm_dev->pool_info[i].next_cnt = 0; zatm_dev->pool_info[i].next_thres = OFF_CNG_THRES; } return 0; } /*----------------------------------- TX ------------------------------------*/ static int do_tx(struct sk_buff *skb) { struct atm_vcc *vcc; struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; u32 *dsc; unsigned long flags; EVENT("do_tx\n",0,0); DPRINTK("sending skb %p\n",skb); vcc = ATM_SKB(skb)->vcc; zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); EVENT("iovcnt=%d\n",skb_shinfo(skb)->nr_frags,0); spin_lock_irqsave(&zatm_dev->lock, flags); if (!skb_shinfo(skb)->nr_frags) { if (zatm_vcc->txing == RING_ENTRIES-1) { spin_unlock_irqrestore(&zatm_dev->lock, flags); return RING_BUSY; } zatm_vcc->txing++; dsc = zatm_vcc->ring+zatm_vcc->ring_curr; zatm_vcc->ring_curr = (zatm_vcc->ring_curr+RING_WORDS) & (RING_ENTRIES*RING_WORDS-1); dsc[1] = 0; dsc[2] = skb->len; dsc[3] = virt_to_bus(skb->data); mb(); dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | uPD98401_TXPD_SM | (vcc->qos.aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 | (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? uPD98401_CLPM_1 : uPD98401_CLPM_0)); EVENT("dsc (0x%lx)\n",(unsigned long) dsc,0); } else { printk("NONONONOO!!!!\n"); dsc = NULL; #if 0 u32 *put; int i; dsc = kmalloc(uPD98401_TXPD_SIZE * 2 + uPD98401_TXBD_SIZE * ATM_SKB(skb)->iovcnt, GFP_ATOMIC); if (!dsc) { if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_irq(skb); return -EAGAIN; } /* @@@ should check alignment */ put = dsc+8; dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | (vcc->aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 | (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? uPD98401_CLPM_1 : uPD98401_CLPM_0)); dsc[1] = 0; dsc[2] = ATM_SKB(skb)->iovcnt * uPD98401_TXBD_SIZE; dsc[3] = virt_to_bus(put); for (i = 0; i < ATM_SKB(skb)->iovcnt; i++) { *put++ = ((struct iovec *) skb->data)[i].iov_len; *put++ = virt_to_bus(((struct iovec *) skb->data)[i].iov_base); } put[-2] |= uPD98401_TXBD_LAST; #endif } ZATM_PRV_DSC(skb) = dsc; skb_queue_tail(&zatm_vcc->tx_queue,skb); DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+ uPD98401_TXVC_QRP)); zwait; zout(uPD98401_TX_READY | (zatm_vcc->tx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); spin_unlock_irqrestore(&zatm_dev->lock, flags); EVENT("done\n",0,0); return 0; } static inline void dequeue_tx(struct atm_vcc *vcc) { struct zatm_vcc *zatm_vcc; struct sk_buff *skb; EVENT("dequeue_tx\n",0,0); zatm_vcc = ZATM_VCC(vcc); skb = skb_dequeue(&zatm_vcc->tx_queue); if (!skb) { printk(KERN_CRIT DEV_LABEL "(itf %d): dequeue_tx but not " "txing\n",vcc->dev->number); return; } #if 0 /* @@@ would fail on CLP */ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP | uPD98401_TXPD_SM | uPD98401_TXPD_AAL5)) printk("@#*$!!!! (%08x)\n", *ZATM_PRV_DSC(skb)); #endif *ZATM_PRV_DSC(skb) = 0; /* mark as invalid */ zatm_vcc->txing--; if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb_irq(skb); while ((skb = skb_dequeue(&zatm_vcc->backlog))) if (do_tx(skb) == RING_BUSY) { skb_queue_head(&zatm_vcc->backlog,skb); break; } atomic_inc(&vcc->stats->tx); wake_up(&zatm_vcc->tx_wait); } static void poll_tx(struct atm_dev *dev,int mbx) { struct zatm_dev *zatm_dev; unsigned long pos; u32 x; EVENT("poll_tx\n",0,0); zatm_dev = ZATM_DEV(dev); pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx)); while (x = zin(MWA(mbx)), (pos & 0xffff) != x) { int chan; #if 1 u32 data,*addr; EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x); addr = (u32 *) pos; data = *addr; chan = (data & uPD98401_TXI_CONN) >> uPD98401_TXI_CONN_SHIFT; EVENT("addr = 0x%lx, data = 0x%08x,",(unsigned long) addr, data); EVENT("chan = %d\n",chan,0); #else NO ! chan = (zatm_dev->mbx_start[mbx][pos >> 2] & uPD98401_TXI_CONN) >> uPD98401_TXI_CONN_SHIFT; #endif if (chan < zatm_dev->chans && zatm_dev->tx_map[chan]) dequeue_tx(zatm_dev->tx_map[chan]); else { printk(KERN_CRIT DEV_LABEL "(itf %d): TX indication " "for non-existing channel %d\n",dev->number,chan); event_dump(); } if (((pos += 4) & 0xffff) == zatm_dev->mbx_end[mbx]) pos = zatm_dev->mbx_start[mbx]; } zout(pos & 0xffff,MTA(mbx)); } /* * BUG BUG BUG: Doesn't handle "new-style" rate specification yet. */ static int alloc_shaper(struct atm_dev *dev,int *pcr,int min,int max,int ubr) { struct zatm_dev *zatm_dev; unsigned long flags; unsigned long i,m,c; int shaper; DPRINTK("alloc_shaper (min = %d, max = %d)\n",min,max); zatm_dev = ZATM_DEV(dev); if (!zatm_dev->free_shapers) return -EAGAIN; for (shaper = 0; !((zatm_dev->free_shapers >> shaper) & 1); shaper++); zatm_dev->free_shapers &= ~1 << shaper; if (ubr) { c = 5; i = m = 1; zatm_dev->ubr_ref_cnt++; zatm_dev->ubr = shaper; *pcr = 0; } else { if (min) { if (min <= 255) { i = min; m = ATM_OC3_PCR; } else { i = 255; m = ATM_OC3_PCR*255/min; } } else { if (max > zatm_dev->tx_bw) max = zatm_dev->tx_bw; if (max <= 255) { i = max; m = ATM_OC3_PCR; } else { i = 255; m = DIV_ROUND_UP(ATM_OC3_PCR*255, max); } } if (i > m) { printk(KERN_CRIT DEV_LABEL "shaper algorithm botched " "[%d,%d] -> i=%ld,m=%ld\n",min,max,i,m); m = i; } *pcr = i*ATM_OC3_PCR/m; c = 20; /* @@@ should use max_cdv ! */ if ((min && *pcr < min) || (max && *pcr > max)) return -EINVAL; if (zatm_dev->tx_bw < *pcr) return -EAGAIN; zatm_dev->tx_bw -= *pcr; } spin_lock_irqsave(&zatm_dev->lock, flags); DPRINTK("i = %d, m = %d, PCR = %d\n",i,m,*pcr); zpokel(zatm_dev,(i << uPD98401_IM_I_SHIFT) | m,uPD98401_IM(shaper)); zpokel(zatm_dev,c << uPD98401_PC_C_SHIFT,uPD98401_PC(shaper)); zpokel(zatm_dev,0,uPD98401_X(shaper)); zpokel(zatm_dev,0,uPD98401_Y(shaper)); zpokel(zatm_dev,uPD98401_PS_E,uPD98401_PS(shaper)); spin_unlock_irqrestore(&zatm_dev->lock, flags); return shaper; } static void dealloc_shaper(struct atm_dev *dev,int shaper) { struct zatm_dev *zatm_dev; unsigned long flags; zatm_dev = ZATM_DEV(dev); if (shaper == zatm_dev->ubr) { if (--zatm_dev->ubr_ref_cnt) return; zatm_dev->ubr = -1; } spin_lock_irqsave(&zatm_dev->lock, flags); zpokel(zatm_dev,zpeekl(zatm_dev,uPD98401_PS(shaper)) & ~uPD98401_PS_E, uPD98401_PS(shaper)); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_dev->free_shapers |= 1 << shaper; } static void close_tx(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; int chan; zatm_vcc = ZATM_VCC(vcc); zatm_dev = ZATM_DEV(vcc->dev); chan = zatm_vcc->tx_chan; if (!chan) return; DPRINTK("close_tx\n"); if (skb_peek(&zatm_vcc->backlog)) { printk("waiting for backlog to drain ...\n"); event_dump(); wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->backlog)); } if (skb_peek(&zatm_vcc->tx_queue)) { printk("waiting for TX queue to drain ...\n"); event_dump(); wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->tx_queue)); } spin_lock_irqsave(&zatm_dev->lock, flags); #if 0 zwait; zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); #endif zwait; zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); zwait; if (!(zin(CMR) & uPD98401_CHAN_ADDR)) printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel " "%d\n",vcc->dev->number,chan); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_vcc->tx_chan = 0; zatm_dev->tx_map[chan] = NULL; if (zatm_vcc->shaper != zatm_dev->ubr) { zatm_dev->tx_bw += vcc->qos.txtp.min_pcr; dealloc_shaper(vcc->dev,zatm_vcc->shaper); } kfree(zatm_vcc->ring); } static int open_tx_first(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; u32 *loop; unsigned short chan; int unlimited; DPRINTK("open_tx_first\n"); zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); zatm_vcc->tx_chan = 0; if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0; spin_lock_irqsave(&zatm_dev->lock, flags); zwait; zout(uPD98401_OPEN_CHAN,CMR); zwait; DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; spin_unlock_irqrestore(&zatm_dev->lock, flags); DPRINTK("chan is %d\n",chan); if (!chan) return -EAGAIN; unlimited = vcc->qos.txtp.traffic_class == ATM_UBR && (!vcc->qos.txtp.max_pcr || vcc->qos.txtp.max_pcr == ATM_MAX_PCR || vcc->qos.txtp.max_pcr >= ATM_OC3_PCR); if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr; else { int uninitialized_var(pcr); if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU; if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr, vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited)) < 0) { close_tx(vcc); return zatm_vcc->shaper; } if (pcr > ATM_OC3_PCR) pcr = ATM_OC3_PCR; vcc->qos.txtp.min_pcr = vcc->qos.txtp.max_pcr = pcr; } zatm_vcc->tx_chan = chan; skb_queue_head_init(&zatm_vcc->tx_queue); init_waitqueue_head(&zatm_vcc->tx_wait); /* initialize ring */ zatm_vcc->ring = kzalloc(RING_SIZE,GFP_KERNEL); if (!zatm_vcc->ring) return -ENOMEM; loop = zatm_vcc->ring+RING_ENTRIES*RING_WORDS; loop[0] = uPD98401_TXPD_V; loop[1] = loop[2] = 0; loop[3] = virt_to_bus(zatm_vcc->ring); zatm_vcc->ring_curr = 0; zatm_vcc->txing = 0; skb_queue_head_init(&zatm_vcc->backlog); zpokel(zatm_dev,virt_to_bus(zatm_vcc->ring), chan*VC_SIZE/4+uPD98401_TXVC_QRP); return 0; } static int open_tx_second(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; DPRINTK("open_tx_second\n"); zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); if (!zatm_vcc->tx_chan) return 0; /* set up VC descriptor */ spin_lock_irqsave(&zatm_dev->lock, flags); zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4); zpokel(zatm_dev,uPD98401_TXVC_L | (zatm_vcc->shaper << uPD98401_TXVC_SHP_SHIFT) | (vcc->vpi << uPD98401_TXVC_VPI_SHIFT) | vcc->vci,zatm_vcc->tx_chan*VC_SIZE/4+1); zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4+2); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_dev->tx_map[zatm_vcc->tx_chan] = vcc; return 0; } static int start_tx(struct atm_dev *dev) { struct zatm_dev *zatm_dev; int i; DPRINTK("start_tx\n"); zatm_dev = ZATM_DEV(dev); zatm_dev->tx_map = kmalloc_array(zatm_dev->chans, sizeof(*zatm_dev->tx_map), GFP_KERNEL); if (!zatm_dev->tx_map) return -ENOMEM; zatm_dev->tx_bw = ATM_OC3_PCR; zatm_dev->free_shapers = (1 << NR_SHAPERS)-1; zatm_dev->ubr = -1; zatm_dev->ubr_ref_cnt = 0; /* initialize shapers */ for (i = 0; i < NR_SHAPERS; i++) zpokel(zatm_dev,0,uPD98401_PS(i)); return 0; } /*------------------------------- interrupts --------------------------------*/ static irqreturn_t zatm_int(int irq,void *dev_id) { struct atm_dev *dev; struct zatm_dev *zatm_dev; u32 reason; int handled = 0; dev = dev_id; zatm_dev = ZATM_DEV(dev); while ((reason = zin(GSR))) { handled = 1; EVENT("reason 0x%x\n",reason,0); if (reason & uPD98401_INT_PI) { EVENT("PHY int\n",0,0); dev->phy->interrupt(dev); } if (reason & uPD98401_INT_RQA) { unsigned long pools; int i; pools = zin(RQA); EVENT("RQA (0x%08x)\n",pools,0); for (i = 0; pools; i++) { if (pools & 1) { refill_pool(dev,i); zatm_dev->pool_info[i].rqa_count++; } pools >>= 1; } } if (reason & uPD98401_INT_RQU) { unsigned long pools; int i; pools = zin(RQU); printk(KERN_WARNING DEV_LABEL "(itf %d): RQU 0x%08lx\n", dev->number,pools); event_dump(); for (i = 0; pools; i++) { if (pools & 1) { refill_pool(dev,i); zatm_dev->pool_info[i].rqu_count++; } pools >>= 1; } } /* don't handle RD */ if (reason & uPD98401_INT_SPE) printk(KERN_ALERT DEV_LABEL "(itf %d): system parity " "error at 0x%08x\n",dev->number,zin(ADDR)); if (reason & uPD98401_INT_CPE) printk(KERN_ALERT DEV_LABEL "(itf %d): control memory " "parity error at 0x%08x\n",dev->number,zin(ADDR)); if (reason & uPD98401_INT_SBE) { printk(KERN_ALERT DEV_LABEL "(itf %d): system bus " "error at 0x%08x\n",dev->number,zin(ADDR)); event_dump(); } /* don't handle IND */ if (reason & uPD98401_INT_MF) { printk(KERN_CRIT DEV_LABEL "(itf %d): mailbox full " "(0x%x)\n",dev->number,(reason & uPD98401_INT_MF) >> uPD98401_INT_MF_SHIFT); event_dump(); /* @@@ should try to recover */ } if (reason & uPD98401_INT_MM) { if (reason & 1) poll_rx(dev,0); if (reason & 2) poll_rx(dev,1); if (reason & 4) poll_tx(dev,2); if (reason & 8) poll_tx(dev,3); } /* @@@ handle RCRn */ } return IRQ_RETVAL(handled); } /*----------------------------- (E)EPROM access -----------------------------*/ static void eprom_set(struct zatm_dev *zatm_dev, unsigned long value, unsigned short cmd) { int error; if ((error = pci_write_config_dword(zatm_dev->pci_dev,cmd,value))) printk(KERN_ERR DEV_LABEL ": PCI write failed (0x%02x)\n", error); } static unsigned long eprom_get(struct zatm_dev *zatm_dev, unsigned short cmd) { unsigned int value; int error; if ((error = pci_read_config_dword(zatm_dev->pci_dev,cmd,&value))) printk(KERN_ERR DEV_LABEL ": PCI read failed (0x%02x)\n", error); return value; } static void eprom_put_bits(struct zatm_dev *zatm_dev, unsigned long data, int bits, unsigned short cmd) { unsigned long value; int i; for (i = bits-1; i >= 0; i--) { value = ZEPROM_CS | (((data >> i) & 1) ? ZEPROM_DI : 0); eprom_set(zatm_dev,value,cmd); eprom_set(zatm_dev,value | ZEPROM_SK,cmd); eprom_set(zatm_dev,value,cmd); } } static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte, unsigned short cmd) { int i; *byte = 0; for (i = 8; i; i--) { eprom_set(zatm_dev,ZEPROM_CS,cmd); eprom_set(zatm_dev,ZEPROM_CS | ZEPROM_SK,cmd); *byte <<= 1; if (eprom_get(zatm_dev,cmd) & ZEPROM_DO) *byte |= 1; eprom_set(zatm_dev,ZEPROM_CS,cmd); } } static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset, int swap) { unsigned char buf[ZEPROM_SIZE]; struct zatm_dev *zatm_dev; int i; zatm_dev = ZATM_DEV(dev); for (i = 0; i < ZEPROM_SIZE; i += 2) { eprom_set(zatm_dev,ZEPROM_CS,cmd); /* select EPROM */ eprom_put_bits(zatm_dev,ZEPROM_CMD_READ,ZEPROM_CMD_LEN,cmd); eprom_put_bits(zatm_dev,i >> 1,ZEPROM_ADDR_LEN,cmd); eprom_get_byte(zatm_dev,buf+i+swap,cmd); eprom_get_byte(zatm_dev,buf+i+1-swap,cmd); eprom_set(zatm_dev,0,cmd); /* deselect EPROM */ } memcpy(dev->esi,buf+offset,ESI_LEN); return memcmp(dev->esi,"\0\0\0\0\0",ESI_LEN); /* assumes ESI_LEN == 6 */ } static void eprom_get_esi(struct atm_dev *dev) { if (eprom_try_esi(dev,ZEPROM_V1_REG,ZEPROM_V1_ESI_OFF,1)) return; (void) eprom_try_esi(dev,ZEPROM_V2_REG,ZEPROM_V2_ESI_OFF,0); } /*--------------------------------- entries ---------------------------------*/ static int zatm_init(struct atm_dev *dev) { struct zatm_dev *zatm_dev; struct pci_dev *pci_dev; unsigned short command; int error,i,last; unsigned long t0,t1,t2; DPRINTK(">zatm_init\n"); zatm_dev = ZATM_DEV(dev); spin_lock_init(&zatm_dev->lock); pci_dev = zatm_dev->pci_dev; zatm_dev->base = pci_resource_start(pci_dev, 0); zatm_dev->irq = pci_dev->irq; if ((error = pci_read_config_word(pci_dev,PCI_COMMAND,&command))) { printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%02x\n", dev->number,error); return -EINVAL; } if ((error = pci_write_config_word(pci_dev,PCI_COMMAND, command | PCI_COMMAND_IO | PCI_COMMAND_MASTER))) { printk(KERN_ERR DEV_LABEL "(itf %d): can't enable IO (0x%02x)" "\n",dev->number,error); return -EIO; } eprom_get_esi(dev); printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%x,irq=%d,", dev->number,pci_dev->revision,zatm_dev->base,zatm_dev->irq); /* reset uPD98401 */ zout(0,SWR); while (!(zin(GSR) & uPD98401_INT_IND)); zout(uPD98401_GMR_ONE /*uPD98401_BURST4*/,GMR); last = MAX_CRAM_SIZE; for (i = last-RAM_INCREMENT; i >= 0; i -= RAM_INCREMENT) { zpokel(zatm_dev,0x55555555,i); if (zpeekl(zatm_dev,i) != 0x55555555) last = i; else { zpokel(zatm_dev,0xAAAAAAAA,i); if (zpeekl(zatm_dev,i) != 0xAAAAAAAA) last = i; else zpokel(zatm_dev,i,i); } } for (i = 0; i < last; i += RAM_INCREMENT) if (zpeekl(zatm_dev,i) != i) break; zatm_dev->mem = i << 2; while (i) zpokel(zatm_dev,0,--i); /* reset again to rebuild memory pointers */ zout(0,SWR); while (!(zin(GSR) & uPD98401_INT_IND)); zout(uPD98401_GMR_ONE | uPD98401_BURST8 | uPD98401_BURST4 | uPD98401_BURST2 | uPD98401_GMR_PM | uPD98401_GMR_DR,GMR); /* TODO: should shrink allocation now */ printk("mem=%dkB,%s (",zatm_dev->mem >> 10,zatm_dev->copper ? "UTP" : "MMF"); for (i = 0; i < ESI_LEN; i++) printk("%02X%s",dev->esi[i],i == ESI_LEN-1 ? ")\n" : "-"); do { unsigned long flags; spin_lock_irqsave(&zatm_dev->lock, flags); t0 = zpeekl(zatm_dev,uPD98401_TSR); udelay(10); t1 = zpeekl(zatm_dev,uPD98401_TSR); udelay(1010); t2 = zpeekl(zatm_dev,uPD98401_TSR); spin_unlock_irqrestore(&zatm_dev->lock, flags); } while (t0 > t1 || t1 > t2); /* loop if wrapping ... */ zatm_dev->khz = t2-2*t1+t0; printk(KERN_NOTICE DEV_LABEL "(itf %d): uPD98401 %d.%d at %d.%03d " "MHz\n",dev->number, (zin(VER) & uPD98401_MAJOR) >> uPD98401_MAJOR_SHIFT, zin(VER) & uPD98401_MINOR,zatm_dev->khz/1000,zatm_dev->khz % 1000); return uPD98402_init(dev); } static int zatm_start(struct atm_dev *dev) { struct zatm_dev *zatm_dev = ZATM_DEV(dev); struct pci_dev *pdev = zatm_dev->pci_dev; unsigned long curr; int pools,vccs,rx; int error, i, ld; DPRINTK("zatm_start\n"); zatm_dev->rx_map = zatm_dev->tx_map = NULL; for (i = 0; i < NR_MBX; i++) zatm_dev->mbx_start[i] = 0; error = request_irq(zatm_dev->irq, zatm_int, IRQF_SHARED, DEV_LABEL, dev); if (error < 0) { printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n", dev->number,zatm_dev->irq); goto done; } /* define memory regions */ pools = NR_POOLS; if (NR_SHAPERS*SHAPER_SIZE > pools*POOL_SIZE) pools = NR_SHAPERS*SHAPER_SIZE/POOL_SIZE; vccs = (zatm_dev->mem-NR_SHAPERS*SHAPER_SIZE-pools*POOL_SIZE)/ (2*VC_SIZE+RX_SIZE); ld = -1; for (rx = 1; rx < vccs; rx <<= 1) ld++; dev->ci_range.vpi_bits = 0; /* @@@ no VPI for now */ dev->ci_range.vci_bits = ld; dev->link_rate = ATM_OC3_PCR; zatm_dev->chans = vccs; /* ??? */ curr = rx*RX_SIZE/4; DPRINTK("RX pool 0x%08lx\n",curr); zpokel(zatm_dev,curr,uPD98401_PMA); /* receive pool */ zatm_dev->pool_base = curr; curr += pools*POOL_SIZE/4; DPRINTK("Shapers 0x%08lx\n",curr); zpokel(zatm_dev,curr,uPD98401_SMA); /* shapers */ curr += NR_SHAPERS*SHAPER_SIZE/4; DPRINTK("Free 0x%08lx\n",curr); zpokel(zatm_dev,curr,uPD98401_TOS); /* free pool */ printk(KERN_INFO DEV_LABEL "(itf %d): %d shapers, %d pools, %d RX, " "%ld VCs\n",dev->number,NR_SHAPERS,pools,rx, (zatm_dev->mem-curr*4)/VC_SIZE); /* create mailboxes */ for (i = 0; i < NR_MBX; i++) { void *mbx; dma_addr_t mbx_dma; if (!mbx_entries[i]) continue; mbx = dma_alloc_coherent(&pdev->dev, 2 * MBX_SIZE(i), &mbx_dma, GFP_KERNEL); if (!mbx) { error = -ENOMEM; goto out; } /* * Alignment provided by dma_alloc_coherent() isn't enough * for this device. */ if (((unsigned long)mbx ^ mbx_dma) & 0xffff) { printk(KERN_ERR DEV_LABEL "(itf %d): system " "bus incompatible with driver\n", dev->number); dma_free_coherent(&pdev->dev, 2*MBX_SIZE(i), mbx, mbx_dma); error = -ENODEV; goto out; } DPRINTK("mbx@0x%08lx-0x%08lx\n", mbx, mbx + MBX_SIZE(i)); zatm_dev->mbx_start[i] = (unsigned long)mbx; zatm_dev->mbx_dma[i] = mbx_dma; zatm_dev->mbx_end[i] = (zatm_dev->mbx_start[i] + MBX_SIZE(i)) & 0xffff; zout(mbx_dma >> 16, MSH(i)); zout(mbx_dma, MSL(i)); zout(zatm_dev->mbx_end[i], MBA(i)); zout((unsigned long)mbx & 0xffff, MTA(i)); zout((unsigned long)mbx & 0xffff, MWA(i)); } error = start_tx(dev); if (error) goto out; error = start_rx(dev); if (error) goto out_tx; error = dev->phy->start(dev); if (error) goto out_rx; zout(0xffffffff,IMR); /* enable interrupts */ /* enable TX & RX */ zout(zin(GMR) | uPD98401_GMR_SE | uPD98401_GMR_RE,GMR); done: return error; out_rx: kfree(zatm_dev->rx_map); out_tx: kfree(zatm_dev->tx_map); out: while (i-- > 0) { dma_free_coherent(&pdev->dev, 2 * MBX_SIZE(i), (void *)zatm_dev->mbx_start[i], zatm_dev->mbx_dma[i]); } free_irq(zatm_dev->irq, dev); goto done; } static void zatm_close(struct atm_vcc *vcc) { DPRINTK(">zatm_close\n"); if (!ZATM_VCC(vcc)) return; clear_bit(ATM_VF_READY,&vcc->flags); close_rx(vcc); EVENT("close_tx\n",0,0); close_tx(vcc); DPRINTK("zatm_close: done waiting\n"); /* deallocate memory */ kfree(ZATM_VCC(vcc)); vcc->dev_data = NULL; clear_bit(ATM_VF_ADDR,&vcc->flags); } static int zatm_open(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; short vpi = vcc->vpi; int vci = vcc->vci; int error; DPRINTK(">zatm_open\n"); zatm_dev = ZATM_DEV(vcc->dev); if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) vcc->dev_data = NULL; if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC) set_bit(ATM_VF_ADDR,&vcc->flags); if (vcc->qos.aal != ATM_AAL5) return -EINVAL; /* @@@ AAL0 */ DPRINTK(DEV_LABEL "(itf %d): open %d.%d\n",vcc->dev->number,vcc->vpi, vcc->vci); if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) { zatm_vcc = kmalloc(sizeof(*zatm_vcc), GFP_KERNEL); if (!zatm_vcc) { clear_bit(ATM_VF_ADDR,&vcc->flags); return -ENOMEM; } vcc->dev_data = zatm_vcc; ZATM_VCC(vcc)->tx_chan = 0; /* for zatm_close after open_rx */ if ((error = open_rx_first(vcc))) { zatm_close(vcc); return error; } if ((error = open_tx_first(vcc))) { zatm_close(vcc); return error; } } if (vci == ATM_VPI_UNSPEC || vpi == ATM_VCI_UNSPEC) return 0; if ((error = open_rx_second(vcc))) { zatm_close(vcc); return error; } if ((error = open_tx_second(vcc))) { zatm_close(vcc); return error; } set_bit(ATM_VF_READY,&vcc->flags); return 0; } static int zatm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flags) { printk("Not yet implemented\n"); return -ENOSYS; /* @@@ */ } static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) { struct zatm_dev *zatm_dev; unsigned long flags; zatm_dev = ZATM_DEV(dev); switch (cmd) { case ZATM_GETPOOLZ: if (!capable(CAP_NET_ADMIN)) return -EPERM; /* fall through */ case ZATM_GETPOOL: { struct zatm_pool_info info; int pool; if (get_user(pool, &((struct zatm_pool_req __user *) arg)->pool_num)) return -EFAULT; if (pool < 0 || pool > ZATM_LAST_POOL) return -EINVAL; spin_lock_irqsave(&zatm_dev->lock, flags); info = zatm_dev->pool_info[pool]; if (cmd == ZATM_GETPOOLZ) { zatm_dev->pool_info[pool].rqa_count = 0; zatm_dev->pool_info[pool].rqu_count = 0; } spin_unlock_irqrestore(&zatm_dev->lock, flags); return copy_to_user( &((struct zatm_pool_req __user *) arg)->info, &info,sizeof(info)) ? -EFAULT : 0; } case ZATM_SETPOOL: { struct zatm_pool_info info; int pool; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (get_user(pool, &((struct zatm_pool_req __user *) arg)->pool_num)) return -EFAULT; if (pool < 0 || pool > ZATM_LAST_POOL) return -EINVAL; if (copy_from_user(&info, &((struct zatm_pool_req __user *) arg)->info, sizeof(info))) return -EFAULT; if (!info.low_water) info.low_water = zatm_dev-> pool_info[pool].low_water; if (!info.high_water) info.high_water = zatm_dev-> pool_info[pool].high_water; if (!info.next_thres) info.next_thres = zatm_dev-> pool_info[pool].next_thres; if (info.low_water >= info.high_water || info.low_water < 0) return -EINVAL; spin_lock_irqsave(&zatm_dev->lock, flags); zatm_dev->pool_info[pool].low_water = info.low_water; zatm_dev->pool_info[pool].high_water = info.high_water; zatm_dev->pool_info[pool].next_thres = info.next_thres; spin_unlock_irqrestore(&zatm_dev->lock, flags); return 0; } default: if (!dev->phy->ioctl) return -ENOIOCTLCMD; return dev->phy->ioctl(dev,cmd,arg); } } static int zatm_getsockopt(struct atm_vcc *vcc,int level,int optname, void __user *optval,int optlen) { return -EINVAL; } static int zatm_setsockopt(struct atm_vcc *vcc,int level,int optname, void __user *optval,unsigned int optlen) { return -EINVAL; } static int zatm_send(struct atm_vcc *vcc,struct sk_buff *skb) { int error; EVENT(">zatm_send 0x%lx\n",(unsigned long) skb,0); if (!ZATM_VCC(vcc)->tx_chan || !test_bit(ATM_VF_READY,&vcc->flags)) { if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); return -EINVAL; } if (!skb) { printk(KERN_CRIT "!skb in zatm_send ?\n"); if (vcc->pop) vcc->pop(vcc,skb); return -EINVAL; } ATM_SKB(skb)->vcc = vcc; error = do_tx(skb); if (error != RING_BUSY) return error; skb_queue_tail(&ZATM_VCC(vcc)->backlog,skb); return 0; } static void zatm_phy_put(struct atm_dev *dev,unsigned char value, unsigned long addr) { struct zatm_dev *zatm_dev; zatm_dev = ZATM_DEV(dev); zwait; zout(value,CER); zout(uPD98401_IND_ACC | uPD98401_IA_B0 | (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); } static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr) { struct zatm_dev *zatm_dev; zatm_dev = ZATM_DEV(dev); zwait; zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW | (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); zwait; return zin(CER) & 0xff; } static const struct atmdev_ops ops = { .open = zatm_open, .close = zatm_close, .ioctl = zatm_ioctl, .getsockopt = zatm_getsockopt, .setsockopt = zatm_setsockopt, .send = zatm_send, .phy_put = zatm_phy_put, .phy_get = zatm_phy_get, .change_qos = zatm_change_qos, }; static int zatm_init_one(struct pci_dev *pci_dev, const struct pci_device_id *ent) { struct atm_dev *dev; struct zatm_dev *zatm_dev; int ret = -ENOMEM; zatm_dev = kmalloc(sizeof(*zatm_dev), GFP_KERNEL); if (!zatm_dev) { printk(KERN_EMERG "%s: memory shortage\n", DEV_LABEL); goto out; } dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL); if (!dev) goto out_free; ret = pci_enable_device(pci_dev); if (ret < 0) goto out_deregister; ret = pci_request_regions(pci_dev, DEV_LABEL); if (ret < 0) goto out_disable; ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); if (ret < 0) goto out_disable; zatm_dev->pci_dev = pci_dev; dev->dev_data = zatm_dev; zatm_dev->copper = (int)ent->driver_data; if ((ret = zatm_init(dev)) || (ret = zatm_start(dev))) goto out_release; pci_set_drvdata(pci_dev, dev); zatm_dev->more = zatm_boards; zatm_boards = dev; ret = 0; out: return ret; out_release: pci_release_regions(pci_dev); out_disable: pci_disable_device(pci_dev); out_deregister: atm_dev_deregister(dev); out_free: kfree(zatm_dev); goto out; } MODULE_LICENSE("GPL"); static struct pci_device_id zatm_pci_tbl[] = { { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER }, { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 }, { 0, } }; MODULE_DEVICE_TABLE(pci, zatm_pci_tbl); static struct pci_driver zatm_driver = { .name = DEV_LABEL, .id_table = zatm_pci_tbl, .probe = zatm_init_one, }; static int __init zatm_init_module(void) { return pci_register_driver(&zatm_driver); } module_init(zatm_init_module); /* module_exit not defined so not unloadable */
gpl-2.0
hzpeterchen/linux-usb
drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
76
17168
/****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * ******************************************************************************/ #define _RTW_STA_MGT_C_ #include <osdep_service.h> #include <drv_types.h> #include <recv_osdep.h> #include <xmit_osdep.h> #include <mlme_osdep.h> #include <sta_info.h> static void _rtw_init_stainfo(struct sta_info *psta) { _func_enter_; _rtw_memset((u8 *)psta, 0, sizeof (struct sta_info)); _rtw_spinlock_init(&psta->lock); _rtw_init_listhead(&psta->list); _rtw_init_listhead(&psta->hash_list); _rtw_init_queue(&psta->sleep_q); psta->sleepq_len = 0; _rtw_init_sta_xmit_priv(&psta->sta_xmitpriv); _rtw_init_sta_recv_priv(&psta->sta_recvpriv); #ifdef CONFIG_88EU_AP_MODE _rtw_init_listhead(&psta->asoc_list); _rtw_init_listhead(&psta->auth_list); psta->expire_to = 0; psta->flags = 0; psta->capability = 0; psta->bpairwise_key_installed = false; #ifdef CONFIG_88EU_AP_MODE psta->nonerp_set = 0; psta->no_short_slot_time_set = 0; psta->no_short_preamble_set = 0; psta->no_ht_gf_set = 0; psta->no_ht_set = 0; psta->ht_20mhz_set = 0; #endif psta->under_exist_checking = 0; psta->keep_alive_trycnt = 0; #endif /* CONFIG_88EU_AP_MODE */ _func_exit_; } u32 _rtw_init_sta_priv(struct sta_priv *pstapriv) { struct sta_info *psta; s32 i; _func_enter_; pstapriv->pallocated_stainfo_buf = rtw_zvmalloc(sizeof(struct sta_info) * NUM_STA + 4); if (!pstapriv->pallocated_stainfo_buf) return _FAIL; pstapriv->pstainfo_buf = pstapriv->pallocated_stainfo_buf + 4 - ((size_t)(pstapriv->pallocated_stainfo_buf) & 3); _rtw_init_queue(&pstapriv->free_sta_queue); _rtw_spinlock_init(&pstapriv->sta_hash_lock); pstapriv->asoc_sta_count = 0; _rtw_init_queue(&pstapriv->sleep_q); _rtw_init_queue(&pstapriv->wakeup_q); psta = (struct sta_info *)(pstapriv->pstainfo_buf); for (i = 0; i < NUM_STA; i++) { _rtw_init_stainfo(psta); _rtw_init_listhead(&(pstapriv->sta_hash[i])); rtw_list_insert_tail(&psta->list, get_list_head(&pstapriv->free_sta_queue)); psta++; } #ifdef CONFIG_88EU_AP_MODE pstapriv->sta_dz_bitmap = 0; pstapriv->tim_bitmap = 0; _rtw_init_listhead(&pstapriv->asoc_list); _rtw_init_listhead(&pstapriv->auth_list); _rtw_spinlock_init(&pstapriv->asoc_list_lock); _rtw_spinlock_init(&pstapriv->auth_list_lock); pstapriv->asoc_list_cnt = 0; pstapriv->auth_list_cnt = 0; pstapriv->auth_to = 3; /* 3*2 = 6 sec */ pstapriv->assoc_to = 3; pstapriv->expire_to = 3; /* 3*2 = 6 sec */ pstapriv->max_num_sta = NUM_STA; #endif _func_exit_; return _SUCCESS; } inline int rtw_stainfo_offset(struct sta_priv *stapriv, struct sta_info *sta) { int offset = (((u8 *)sta) - stapriv->pstainfo_buf)/sizeof(struct sta_info); if (!stainfo_offset_valid(offset)) DBG_88E("%s invalid offset(%d), out of range!!!", __func__, offset); return offset; } inline struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int offset) { if (!stainfo_offset_valid(offset)) DBG_88E("%s invalid offset(%d), out of range!!!", __func__, offset); return (struct sta_info *)(stapriv->pstainfo_buf + offset * sizeof(struct sta_info)); } void _rtw_free_sta_xmit_priv_lock(struct sta_xmit_priv *psta_xmitpriv); void _rtw_free_sta_xmit_priv_lock(struct sta_xmit_priv *psta_xmitpriv) { _func_enter_; _rtw_spinlock_free(&psta_xmitpriv->lock); _rtw_spinlock_free(&(psta_xmitpriv->be_q.sta_pending.lock)); _rtw_spinlock_free(&(psta_xmitpriv->bk_q.sta_pending.lock)); _rtw_spinlock_free(&(psta_xmitpriv->vi_q.sta_pending.lock)); _rtw_spinlock_free(&(psta_xmitpriv->vo_q.sta_pending.lock)); _func_exit_; } static void _rtw_free_sta_recv_priv_lock(struct sta_recv_priv *psta_recvpriv) { _func_enter_; _rtw_spinlock_free(&psta_recvpriv->lock); _rtw_spinlock_free(&(psta_recvpriv->defrag_q.lock)); _func_exit_; } void rtw_mfree_stainfo(struct sta_info *psta); void rtw_mfree_stainfo(struct sta_info *psta) { _func_enter_; if (&psta->lock != NULL) _rtw_spinlock_free(&psta->lock); _rtw_free_sta_xmit_priv_lock(&psta->sta_xmitpriv); _rtw_free_sta_recv_priv_lock(&psta->sta_recvpriv); _func_exit_; } /* this function is used to free the memory of lock || sema for all stainfos */ void rtw_mfree_all_stainfo(struct sta_priv *pstapriv); void rtw_mfree_all_stainfo(struct sta_priv *pstapriv) { unsigned long irql; struct list_head *plist, *phead; struct sta_info *psta = NULL; _func_enter_; _enter_critical_bh(&pstapriv->sta_hash_lock, &irql); phead = get_list_head(&pstapriv->free_sta_queue); plist = get_next(phead); while ((rtw_end_of_queue_search(phead, plist)) == false) { psta = LIST_CONTAINOR(plist, struct sta_info , list); plist = get_next(plist); rtw_mfree_stainfo(psta); } _exit_critical_bh(&pstapriv->sta_hash_lock, &irql); _func_exit_; } static void rtw_mfree_sta_priv_lock(struct sta_priv *pstapriv) { #ifdef CONFIG_88EU_AP_MODE struct wlan_acl_pool *pacl_list = &pstapriv->acl_list; #endif rtw_mfree_all_stainfo(pstapriv); /* be done before free sta_hash_lock */ _rtw_spinlock_free(&pstapriv->free_sta_queue.lock); _rtw_spinlock_free(&pstapriv->sta_hash_lock); _rtw_spinlock_free(&pstapriv->wakeup_q.lock); _rtw_spinlock_free(&pstapriv->sleep_q.lock); #ifdef CONFIG_88EU_AP_MODE _rtw_spinlock_free(&pstapriv->asoc_list_lock); _rtw_spinlock_free(&pstapriv->auth_list_lock); _rtw_spinlock_free(&pacl_list->acl_node_q.lock); #endif } u32 _rtw_free_sta_priv(struct sta_priv *pstapriv) { unsigned long irql; struct list_head *phead, *plist; struct sta_info *psta = NULL; struct recv_reorder_ctrl *preorder_ctrl; int index; _func_enter_; if (pstapriv) { /* delete all reordering_ctrl_timer */ _enter_critical_bh(&pstapriv->sta_hash_lock, &irql); for (index = 0; index < NUM_STA; index++) { phead = &(pstapriv->sta_hash[index]); plist = get_next(phead); while ((rtw_end_of_queue_search(phead, plist)) == false) { int i; psta = LIST_CONTAINOR(plist, struct sta_info , hash_list); plist = get_next(plist); for (i = 0; i < 16; i++) { preorder_ctrl = &psta->recvreorder_ctrl[i]; _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer); } } } _exit_critical_bh(&pstapriv->sta_hash_lock, &irql); /*===============================*/ rtw_mfree_sta_priv_lock(pstapriv); if (pstapriv->pallocated_stainfo_buf) rtw_vmfree(pstapriv->pallocated_stainfo_buf, sizeof(struct sta_info)*NUM_STA+4); } _func_exit_; return _SUCCESS; } struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr) { unsigned long irql, irql2; s32 index; struct list_head *phash_list; struct sta_info *psta; struct __queue *pfree_sta_queue; struct recv_reorder_ctrl *preorder_ctrl; int i = 0; u16 wRxSeqInitialValue = 0xffff; _func_enter_; pfree_sta_queue = &pstapriv->free_sta_queue; _enter_critical_bh(&(pfree_sta_queue->lock), &irql); if (_rtw_queue_empty(pfree_sta_queue) == true) { _exit_critical_bh(&(pfree_sta_queue->lock), &irql); psta = NULL; } else { psta = LIST_CONTAINOR(get_next(&pfree_sta_queue->queue), struct sta_info, list); rtw_list_delete(&(psta->list)); _exit_critical_bh(&(pfree_sta_queue->lock), &irql); _rtw_init_stainfo(psta); memcpy(psta->hwaddr, hwaddr, ETH_ALEN); index = wifi_mac_hash(hwaddr); RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_, ("rtw_alloc_stainfo: index=%x", index)); if (index >= NUM_STA) { RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("ERROR => rtw_alloc_stainfo: index >= NUM_STA")); psta = NULL; goto exit; } phash_list = &(pstapriv->sta_hash[index]); _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql2); rtw_list_insert_tail(&psta->hash_list, phash_list); pstapriv->asoc_sta_count++; _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql2); /* Commented by Albert 2009/08/13 */ /* For the SMC router, the sequence number of first packet of WPS handshake will be 0. */ /* In this case, this packet will be dropped by recv_decache function if we use the 0x00 as the default value for tid_rxseq variable. */ /* So, we initialize the tid_rxseq variable as the 0xffff. */ for (i = 0; i < 16; i++) memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i], &wRxSeqInitialValue, 2); RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_, ("alloc number_%d stainfo with hwaddr = %pM\n", pstapriv->asoc_sta_count , hwaddr)); init_addba_retry_timer(pstapriv->padapter, psta); /* for A-MPDU Rx reordering buffer control */ for (i = 0; i < 16; i++) { preorder_ctrl = &psta->recvreorder_ctrl[i]; preorder_ctrl->padapter = pstapriv->padapter; preorder_ctrl->enable = false; preorder_ctrl->indicate_seq = 0xffff; preorder_ctrl->wend_b = 0xffff; preorder_ctrl->wsize_b = 64;/* 64; */ _rtw_init_queue(&preorder_ctrl->pending_recvframe_queue); rtw_init_recv_timer(preorder_ctrl); } /* init for DM */ psta->rssi_stat.UndecoratedSmoothedPWDB = (-1); psta->rssi_stat.UndecoratedSmoothedCCK = (-1); /* init for the sequence number of received management frame */ psta->RxMgmtFrameSeqNum = 0xffff; } exit: _func_exit_; return psta; } /* using pstapriv->sta_hash_lock to protect */ u32 rtw_free_stainfo(struct adapter *padapter , struct sta_info *psta) { int i; unsigned long irql0; struct __queue *pfree_sta_queue; struct recv_reorder_ctrl *preorder_ctrl; struct sta_xmit_priv *pstaxmitpriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct sta_priv *pstapriv = &padapter->stapriv; _func_enter_; if (psta == NULL) goto exit; pfree_sta_queue = &pstapriv->free_sta_queue; pstaxmitpriv = &psta->sta_xmitpriv; _enter_critical_bh(&pxmitpriv->lock, &irql0); rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q); psta->sleepq_len = 0; rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending); rtw_list_delete(&(pstaxmitpriv->vo_q.tx_pending)); rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending); rtw_list_delete(&(pstaxmitpriv->vi_q.tx_pending)); rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending); rtw_list_delete(&(pstaxmitpriv->bk_q.tx_pending)); rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending); rtw_list_delete(&(pstaxmitpriv->be_q.tx_pending)); _exit_critical_bh(&pxmitpriv->lock, &irql0); rtw_list_delete(&psta->hash_list); RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("\n free number_%d stainfo with hwaddr=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n", pstapriv->asoc_sta_count , psta->hwaddr[0], psta->hwaddr[1], psta->hwaddr[2], psta->hwaddr[3], psta->hwaddr[4], psta->hwaddr[5])); pstapriv->asoc_sta_count--; /* re-init sta_info; 20061114 */ _rtw_init_sta_xmit_priv(&psta->sta_xmitpriv); _rtw_init_sta_recv_priv(&psta->sta_recvpriv); _cancel_timer_ex(&psta->addba_retry_timer); /* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */ for (i = 0; i < 16; i++) { unsigned long irql; struct list_head *phead, *plist; union recv_frame *prframe; struct __queue *ppending_recvframe_queue; struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue; preorder_ctrl = &psta->recvreorder_ctrl[i]; _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer); ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue; _enter_critical_bh(&ppending_recvframe_queue->lock, &irql); phead = get_list_head(ppending_recvframe_queue); plist = get_next(phead); while (!rtw_is_list_empty(phead)) { prframe = LIST_CONTAINOR(plist, union recv_frame, u); plist = get_next(plist); rtw_list_delete(&(prframe->u.hdr.list)); rtw_free_recvframe(prframe, pfree_recv_queue); } _exit_critical_bh(&ppending_recvframe_queue->lock, &irql); } if (!(psta->state & WIFI_AP_STATE)) rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, false); #ifdef CONFIG_88EU_AP_MODE _enter_critical_bh(&pstapriv->auth_list_lock, &irql0); if (!rtw_is_list_empty(&psta->auth_list)) { rtw_list_delete(&psta->auth_list); pstapriv->auth_list_cnt--; } _exit_critical_bh(&pstapriv->auth_list_lock, &irql0); psta->expire_to = 0; psta->sleepq_ac_len = 0; psta->qos_info = 0; psta->max_sp_len = 0; psta->uapsd_bk = 0; psta->uapsd_be = 0; psta->uapsd_vi = 0; psta->uapsd_vo = 0; psta->has_legacy_ac = 0; pstapriv->sta_dz_bitmap &= ~BIT(psta->aid); pstapriv->tim_bitmap &= ~BIT(psta->aid); if ((psta->aid > 0) && (pstapriv->sta_aid[psta->aid - 1] == psta)) { pstapriv->sta_aid[psta->aid - 1] = NULL; psta->aid = 0; } psta->under_exist_checking = 0; #endif /* CONFIG_88EU_AP_MODE */ _enter_critical_bh(&(pfree_sta_queue->lock), &irql0); rtw_list_insert_tail(&psta->list, get_list_head(pfree_sta_queue)); _exit_critical_bh(&(pfree_sta_queue->lock), &irql0); exit: _func_exit_; return _SUCCESS; } /* free all stainfo which in sta_hash[all] */ void rtw_free_all_stainfo(struct adapter *padapter) { unsigned long irql; struct list_head *plist, *phead; s32 index; struct sta_info *psta = NULL; struct sta_priv *pstapriv = &padapter->stapriv; struct sta_info *pbcmc_stainfo = rtw_get_bcmc_stainfo(padapter); _func_enter_; if (pstapriv->asoc_sta_count == 1) goto exit; _enter_critical_bh(&pstapriv->sta_hash_lock, &irql); for (index = 0; index < NUM_STA; index++) { phead = &(pstapriv->sta_hash[index]); plist = get_next(phead); while ((!rtw_end_of_queue_search(phead, plist))) { psta = LIST_CONTAINOR(plist, struct sta_info , hash_list); plist = get_next(plist); if (pbcmc_stainfo != psta) rtw_free_stainfo(padapter , psta); } } _exit_critical_bh(&pstapriv->sta_hash_lock, &irql); exit: _func_exit_; } /* any station allocated can be searched by hash list */ struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr) { unsigned long irql; struct list_head *plist, *phead; struct sta_info *psta = NULL; u32 index; u8 *addr; u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; _func_enter_; if (hwaddr == NULL) return NULL; if (IS_MCAST(hwaddr)) addr = bc_addr; else addr = hwaddr; index = wifi_mac_hash(addr); _enter_critical_bh(&pstapriv->sta_hash_lock, &irql); phead = &(pstapriv->sta_hash[index]); plist = get_next(phead); while ((!rtw_end_of_queue_search(phead, plist))) { psta = LIST_CONTAINOR(plist, struct sta_info, hash_list); if ((_rtw_memcmp(psta->hwaddr, addr, ETH_ALEN)) == true) { /* if found the matched address */ break; } psta = NULL; plist = get_next(plist); } _exit_critical_bh(&pstapriv->sta_hash_lock, &irql); _func_exit_; return psta; } u32 rtw_init_bcmc_stainfo(struct adapter *padapter) { struct sta_info *psta; u32 res = _SUCCESS; unsigned char bcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct sta_priv *pstapriv = &padapter->stapriv; _func_enter_; psta = rtw_alloc_stainfo(pstapriv, bcast_addr); if (psta == NULL) { res = _FAIL; RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("rtw_alloc_stainfo fail")); goto exit; } /* default broadcast & multicast use macid 1 */ psta->mac_id = 1; exit: _func_exit_; return res; } struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter) { struct sta_info *psta; struct sta_priv *pstapriv = &padapter->stapriv; u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; _func_enter_; psta = rtw_get_stainfo(pstapriv, bc_addr); _func_exit_; return psta; } u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr) { u8 res = true; #ifdef CONFIG_88EU_AP_MODE unsigned long irql; struct list_head *plist, *phead; struct rtw_wlan_acl_node *paclnode; u8 match = false; struct sta_priv *pstapriv = &padapter->stapriv; struct wlan_acl_pool *pacl_list = &pstapriv->acl_list; struct __queue *pacl_node_q = &pacl_list->acl_node_q; _enter_critical_bh(&(pacl_node_q->lock), &irql); phead = get_list_head(pacl_node_q); plist = get_next(phead); while ((!rtw_end_of_queue_search(phead, plist))) { paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list); plist = get_next(plist); if (_rtw_memcmp(paclnode->addr, mac_addr, ETH_ALEN)) { if (paclnode->valid) { match = true; break; } } } _exit_critical_bh(&(pacl_node_q->lock), &irql); if (pacl_list->mode == 1)/* accept unless in deny list */ res = (match) ? false : true; else if (pacl_list->mode == 2)/* deny unless in accept list */ res = (match) ? true : false; else res = true; #endif return res; }
gpl-2.0
wrapped/kernel_htc_m7
drivers/usb/gadget/u_smd.c
332
21940
/* * u_smd.c - utilities for USB gadget serial over smd * * Copyright (c) 2011, Code Aurora Forum. All rights reserved. * * This code also borrows from drivers/usb/gadget/u_serial.c, which is * Copyright (C) 2000 - 2003 Al Borchers (alborchers@steinerpoint.com) * Copyright (C) 2008 David Brownell * Copyright (C) 2008 by Nokia Corporation * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2000 Peter Berger (pberger@brimson.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/termios.h> #include <mach/msm_smd.h> #include <linux/debugfs.h> #include "u_serial.h" #define SMD_RX_QUEUE_SIZE 8 #define SMD_RX_BUF_SIZE 2048 #define SMD_TX_QUEUE_SIZE 8 #define SMD_TX_BUF_SIZE 2048 static struct workqueue_struct *gsmd_wq; #define SMD_N_PORTS 2 #define CH_OPENED 0 #define CH_READY 1 struct smd_port_info { struct smd_channel *ch; char *name; unsigned long flags; }; struct smd_port_info smd_pi[SMD_N_PORTS] = { { .name = "DS", }, { .name = "UNUSED", }, }; struct gsmd_port { unsigned port_num; spinlock_t port_lock; unsigned n_read; struct list_head read_pool; struct list_head read_queue; struct work_struct push; struct list_head write_pool; struct work_struct pull; struct gserial *port_usb; struct smd_port_info *pi; struct delayed_work connect_work; /* At present, smd does not notify * control bit change info from modem */ struct work_struct update_modem_ctrl_sig; #define SMD_ACM_CTRL_DTR 0x01 #define SMD_ACM_CTRL_RTS 0x02 unsigned cbits_to_modem; #define SMD_ACM_CTRL_DCD 0x01 #define SMD_ACM_CTRL_DSR 0x02 #define SMD_ACM_CTRL_BRK 0x04 #define SMD_ACM_CTRL_RI 0x08 unsigned cbits_to_laptop; /* pkt counters */ unsigned long nbytes_tomodem; unsigned long nbytes_tolaptop; }; static struct smd_portmaster { struct mutex lock; struct gsmd_port *port; struct platform_driver pdrv; } smd_ports[SMD_N_PORTS]; static unsigned n_smd_ports; static void gsmd_free_req(struct usb_ep *ep, struct usb_request *req) { kfree(req->buf); usb_ep_free_request(ep, req); } static void gsmd_free_requests(struct usb_ep *ep, struct list_head *head) { struct usb_request *req; while (!list_empty(head)) { req = list_entry(head->next, struct usb_request, list); list_del(&req->list); gsmd_free_req(ep, req); } } static struct usb_request * gsmd_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags) { struct usb_request *req; req = usb_ep_alloc_request(ep, flags); if (!req) { pr_err("%s: usb alloc request failed\n", __func__); return 0; } req->length = len; req->buf = kmalloc(len, flags); if (!req->buf) { pr_err("%s: request buf allocation failed\n", __func__); usb_ep_free_request(ep, req); return 0; } return req; } static int gsmd_alloc_requests(struct usb_ep *ep, struct list_head *head, int num, int size, void (*cb)(struct usb_ep *ep, struct usb_request *)) { int i; struct usb_request *req; pr_debug("%s: ep:%p head:%p num:%d size:%d cb:%p", __func__, ep, head, num, size, cb); for (i = 0; i < num; i++) { req = gsmd_alloc_req(ep, size, GFP_ATOMIC); if (!req) { pr_debug("%s: req allocated:%d\n", __func__, i); return list_empty(head) ? -ENOMEM : 0; } req->complete = cb; list_add(&req->list, head); } return 0; } static void gsmd_start_rx(struct gsmd_port *port) { struct list_head *pool; struct usb_ep *out; unsigned long flags; int ret; if (!port) { pr_err("%s: port is null\n", __func__); return; } spin_lock_irqsave(&port->port_lock, flags); if (!port->port_usb) { pr_debug("%s: USB disconnected\n", __func__); goto start_rx_end; } pool = &port->read_pool; out = port->port_usb->out; while (test_bit(CH_OPENED, &port->pi->flags) && !list_empty(pool)) { struct usb_request *req; req = list_entry(pool->next, struct usb_request, list); list_del(&req->list); req->length = SMD_RX_BUF_SIZE; spin_unlock_irqrestore(&port->port_lock, flags); ret = usb_ep_queue(out, req, GFP_KERNEL); spin_lock_irqsave(&port->port_lock, flags); if (ret) { pr_err("%s: usb ep out queue failed" "port:%p, port#%d\n", __func__, port, port->port_num); list_add_tail(&req->list, pool); break; } } start_rx_end: spin_unlock_irqrestore(&port->port_lock, flags); } static void gsmd_rx_push(struct work_struct *w) { struct gsmd_port *port = container_of(w, struct gsmd_port, push); struct smd_port_info *pi = port->pi; struct list_head *q; pr_debug("%s: port:%p port#%d", __func__, port, port->port_num); spin_lock_irq(&port->port_lock); q = &port->read_queue; while (pi->ch && !list_empty(q)) { struct usb_request *req; int avail; req = list_first_entry(q, struct usb_request, list); switch (req->status) { case -ESHUTDOWN: pr_debug("%s: req status shutdown portno#%d port:%p\n", __func__, port->port_num, port); goto rx_push_end; default: pr_warning("%s: port:%p port#%d" " Unexpected Rx Status:%d\n", __func__, port, port->port_num, req->status); case 0: /* normal completion */ break; } avail = smd_write_avail(pi->ch); if (!avail) goto rx_push_end; if (req->actual) { char *packet = req->buf; unsigned size = req->actual; unsigned n; int count; n = port->n_read; if (n) { packet += n; size -= n; } count = smd_write(pi->ch, packet, size); if (count < 0) { pr_err("%s: smd write failed err:%d\n", __func__, count); goto rx_push_end; } if (count != size) { port->n_read += count; goto rx_push_end; } port->nbytes_tomodem += count; } port->n_read = 0; list_move(&req->list, &port->read_pool); } rx_push_end: spin_unlock_irq(&port->port_lock); gsmd_start_rx(port); } static void gsmd_read_pending(struct gsmd_port *port) { int avail; if (!port || !port->pi->ch) return; /* passing null buffer discards the data */ while ((avail = smd_read_avail(port->pi->ch))) smd_read(port->pi->ch, 0, avail); return; } static void gsmd_tx_pull(struct work_struct *w) { struct gsmd_port *port = container_of(w, struct gsmd_port, pull); struct list_head *pool = &port->write_pool; struct smd_port_info *pi = port->pi; struct usb_ep *in; pr_debug("%s: port:%p port#%d pool:%p\n", __func__, port, port->port_num, pool); spin_lock_irq(&port->port_lock); if (!port->port_usb) { pr_debug("%s: usb is disconnected\n", __func__); spin_unlock_irq(&port->port_lock); gsmd_read_pending(port); return; } in = port->port_usb->in; while (pi->ch && !list_empty(pool)) { struct usb_request *req; int avail; int ret; avail = smd_read_avail(pi->ch); if (!avail) break; avail = avail > SMD_TX_BUF_SIZE ? SMD_TX_BUF_SIZE : avail; req = list_entry(pool->next, struct usb_request, list); list_del(&req->list); req->length = smd_read(pi->ch, req->buf, avail); spin_unlock_irq(&port->port_lock); ret = usb_ep_queue(in, req, GFP_KERNEL); spin_lock_irq(&port->port_lock); if (ret) { pr_err("%s: usb ep out queue failed" "port:%p, port#%d err:%d\n", __func__, port, port->port_num, ret); /* could be usb disconnected */ if (!port->port_usb) gsmd_free_req(in, req); else list_add(&req->list, pool); goto tx_pull_end; } port->nbytes_tolaptop += req->length; } tx_pull_end: /* TBD: Check how code behaves on USB bus suspend */ if (port->port_usb && smd_read_avail(port->pi->ch) && !list_empty(pool)) queue_work(gsmd_wq, &port->pull); spin_unlock_irq(&port->port_lock); return; } static void gsmd_read_complete(struct usb_ep *ep, struct usb_request *req) { struct gsmd_port *port = ep->driver_data; pr_debug("%s: ep:%p port:%p\n", __func__, ep, port); if (!port) { pr_err("%s: port is null\n", __func__); return; } spin_lock(&port->port_lock); if (!test_bit(CH_OPENED, &port->pi->flags) || req->status == -ESHUTDOWN) { spin_unlock(&port->port_lock); gsmd_free_req(ep, req); return; } list_add_tail(&req->list, &port->read_queue); queue_work(gsmd_wq, &port->push); spin_unlock(&port->port_lock); return; } static void gsmd_write_complete(struct usb_ep *ep, struct usb_request *req) { struct gsmd_port *port = ep->driver_data; pr_debug("%s: ep:%p port:%p\n", __func__, ep, port); if (!port) { pr_err("%s: port is null\n", __func__); return; } spin_lock(&port->port_lock); if (!test_bit(CH_OPENED, &port->pi->flags) || req->status == -ESHUTDOWN) { spin_unlock(&port->port_lock); gsmd_free_req(ep, req); return; } if (req->status) pr_warning("%s: port:%p port#%d unexpected %s status %d\n", __func__, port, port->port_num, ep->name, req->status); list_add(&req->list, &port->write_pool); queue_work(gsmd_wq, &port->pull); spin_unlock(&port->port_lock); return; } static void gsmd_start_io(struct gsmd_port *port) { int ret = -ENODEV; pr_debug("%s: port: %p\n", __func__, port); spin_lock(&port->port_lock); if (!port->port_usb) goto start_io_out; smd_tiocmset_from_cb(port->pi->ch, port->cbits_to_modem, ~port->cbits_to_modem); ret = gsmd_alloc_requests(port->port_usb->out, &port->read_pool, SMD_RX_QUEUE_SIZE, SMD_RX_BUF_SIZE, gsmd_read_complete); if (ret) { pr_err("%s: unable to allocate out requests\n", __func__); goto start_io_out; } ret = gsmd_alloc_requests(port->port_usb->in, &port->write_pool, SMD_TX_QUEUE_SIZE, SMD_TX_BUF_SIZE, gsmd_write_complete); if (ret) { gsmd_free_requests(port->port_usb->out, &port->read_pool); pr_err("%s: unable to allocate IN requests\n", __func__); goto start_io_out; } start_io_out: spin_unlock(&port->port_lock); if (ret) return; gsmd_start_rx(port); } static unsigned int convert_uart_sigs_to_acm(unsigned uart_sig) { unsigned int acm_sig = 0; /* should this needs to be in calling functions ??? */ uart_sig &= (TIOCM_RI | TIOCM_CD | TIOCM_DSR); if (uart_sig & TIOCM_RI) acm_sig |= SMD_ACM_CTRL_RI; if (uart_sig & TIOCM_CD) acm_sig |= SMD_ACM_CTRL_DCD; if (uart_sig & TIOCM_DSR) acm_sig |= SMD_ACM_CTRL_DSR; return acm_sig; } static unsigned int convert_acm_sigs_to_uart(unsigned acm_sig) { unsigned int uart_sig = 0; /* should this needs to be in calling functions ??? */ acm_sig &= (SMD_ACM_CTRL_DTR | SMD_ACM_CTRL_RTS); if (acm_sig & SMD_ACM_CTRL_DTR) uart_sig |= TIOCM_DTR; if (acm_sig & SMD_ACM_CTRL_RTS) uart_sig |= TIOCM_RTS; return uart_sig; } static void gsmd_stop_io(struct gsmd_port *port) { struct usb_ep *in; struct usb_ep *out; unsigned long flags; spin_lock_irqsave(&port->port_lock, flags); if (!port->port_usb) { spin_unlock_irqrestore(&port->port_lock, flags); return; } in = port->port_usb->in; out = port->port_usb->out; spin_unlock_irqrestore(&port->port_lock, flags); usb_ep_fifo_flush(in); usb_ep_fifo_flush(out); spin_lock(&port->port_lock); if (port->port_usb) { gsmd_free_requests(out, &port->read_pool); gsmd_free_requests(out, &port->read_queue); gsmd_free_requests(in, &port->write_pool); port->n_read = 0; port->cbits_to_laptop = 0; } if (port->port_usb->send_modem_ctrl_bits) port->port_usb->send_modem_ctrl_bits( port->port_usb, port->cbits_to_laptop); spin_unlock(&port->port_lock); } static void gsmd_notify(void *priv, unsigned event) { struct gsmd_port *port = priv; struct smd_port_info *pi = port->pi; int i; switch (event) { case SMD_EVENT_DATA: pr_debug("%s: Event data\n", __func__); if (smd_read_avail(pi->ch)) queue_work(gsmd_wq, &port->pull); if (smd_write_avail(pi->ch)) queue_work(gsmd_wq, &port->push); break; case SMD_EVENT_OPEN: pr_debug("%s: Event Open\n", __func__); set_bit(CH_OPENED, &pi->flags); gsmd_start_io(port); break; case SMD_EVENT_CLOSE: pr_debug("%s: Event Close\n", __func__); clear_bit(CH_OPENED, &pi->flags); gsmd_stop_io(port); break; case SMD_EVENT_STATUS: i = smd_tiocmget(port->pi->ch); port->cbits_to_laptop = convert_uart_sigs_to_acm(i); if (port->port_usb && port->port_usb->send_modem_ctrl_bits) port->port_usb->send_modem_ctrl_bits(port->port_usb, port->cbits_to_laptop); break; } } static void gsmd_connect_work(struct work_struct *w) { struct gsmd_port *port; struct smd_port_info *pi; int ret; port = container_of(w, struct gsmd_port, connect_work.work); pi = port->pi; pr_debug("%s: port:%p port#%d\n", __func__, port, port->port_num); if (!test_bit(CH_READY, &pi->flags)) return; ret = smd_named_open_on_edge(pi->name, SMD_APPS_MODEM, &pi->ch, port, gsmd_notify); if (ret) { if (ret == -EAGAIN) { /* port not ready - retry */ pr_debug("%s: SMD port not ready - rescheduling:%s err:%d\n", __func__, pi->name, ret); queue_delayed_work(gsmd_wq, &port->connect_work, msecs_to_jiffies(250)); } else { pr_err("%s: unable to open smd port:%s err:%d\n", __func__, pi->name, ret); } } } static void gsmd_notify_modem(void *gptr, u8 portno, int ctrl_bits) { struct gsmd_port *port; int temp; struct gserial *gser = gptr; if (portno >= n_smd_ports) { pr_err("%s: invalid portno#%d\n", __func__, portno); return; } if (!gser) { pr_err("%s: gser is null\n", __func__); return; } port = smd_ports[portno].port; temp = convert_acm_sigs_to_uart(ctrl_bits); if (temp == port->cbits_to_modem) return; port->cbits_to_modem = temp; /* usb could send control signal before smd is ready */ if (!test_bit(CH_OPENED, &port->pi->flags)) return; /* if DTR is high, update latest modem info to laptop */ if (port->cbits_to_modem & TIOCM_DTR) { unsigned i; i = smd_tiocmget(port->pi->ch); port->cbits_to_laptop = convert_uart_sigs_to_acm(i); if (gser->send_modem_ctrl_bits) gser->send_modem_ctrl_bits( port->port_usb, port->cbits_to_laptop); } smd_tiocmset(port->pi->ch, port->cbits_to_modem, ~port->cbits_to_modem); } int gsmd_connect(struct gserial *gser, u8 portno) { unsigned long flags; int ret; struct gsmd_port *port; pr_debug("%s: gserial:%p portno:%u\n", __func__, gser, portno); if (portno >= n_smd_ports) { pr_err("%s: Invalid port no#%d", __func__, portno); return -EINVAL; } if (!gser) { pr_err("%s: gser is null\n", __func__); return -EINVAL; } port = smd_ports[portno].port; spin_lock_irqsave(&port->port_lock, flags); port->port_usb = gser; gser->notify_modem = gsmd_notify_modem; port->nbytes_tomodem = 0; port->nbytes_tolaptop = 0; spin_unlock_irqrestore(&port->port_lock, flags); ret = usb_ep_enable(gser->in); if (ret) { pr_err("%s: usb_ep_enable failed eptype:IN ep:%p", __func__, gser->in); port->port_usb = 0; return ret; } gser->in->driver_data = port; ret = usb_ep_enable(gser->out); if (ret) { pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p", __func__, gser->out); port->port_usb = 0; gser->in->driver_data = 0; return ret; } gser->out->driver_data = port; queue_delayed_work(gsmd_wq, &port->connect_work, msecs_to_jiffies(0)); return 0; } void gsmd_disconnect(struct gserial *gser, u8 portno) { unsigned long flags; struct gsmd_port *port; pr_debug("%s: gserial:%p portno:%u\n", __func__, gser, portno); if (portno >= n_smd_ports) { pr_err("%s: invalid portno#%d\n", __func__, portno); return; } if (!gser) { pr_err("%s: gser is null\n", __func__); return; } port = smd_ports[portno].port; spin_lock_irqsave(&port->port_lock, flags); port->port_usb = 0; spin_unlock_irqrestore(&port->port_lock, flags); /* disable endpoints, aborting down any active I/O */ usb_ep_disable(gser->out); usb_ep_disable(gser->in); spin_lock_irqsave(&port->port_lock, flags); gsmd_free_requests(gser->out, &port->read_pool); gsmd_free_requests(gser->out, &port->read_queue); gsmd_free_requests(gser->in, &port->write_pool); port->n_read = 0; spin_unlock_irqrestore(&port->port_lock, flags); if (test_and_clear_bit(CH_OPENED, &port->pi->flags)) { /* lower the dtr */ port->cbits_to_modem = 0; smd_tiocmset(port->pi->ch, port->cbits_to_modem, ~port->cbits_to_modem); } if (port->pi->ch) { smd_close(port->pi->ch); port->pi->ch = NULL; } } #define SMD_CH_MAX_LEN 20 static int gsmd_ch_probe(struct platform_device *pdev) { struct gsmd_port *port; struct smd_port_info *pi; int i; unsigned long flags; pr_debug("%s: name:%s\n", __func__, pdev->name); for (i = 0; i < n_smd_ports; i++) { port = smd_ports[i].port; pi = port->pi; if (!strncmp(pi->name, pdev->name, SMD_CH_MAX_LEN)) { set_bit(CH_READY, &pi->flags); spin_lock_irqsave(&port->port_lock, flags); if (port->port_usb) queue_delayed_work(gsmd_wq, &port->connect_work, msecs_to_jiffies(0)); spin_unlock_irqrestore(&port->port_lock, flags); break; } } return 0; } static int gsmd_ch_remove(struct platform_device *pdev) { struct gsmd_port *port; struct smd_port_info *pi; int i; pr_debug("%s: name:%s\n", __func__, pdev->name); for (i = 0; i < n_smd_ports; i++) { port = smd_ports[i].port; pi = port->pi; if (!strncmp(pi->name, pdev->name, SMD_CH_MAX_LEN)) { clear_bit(CH_READY, &pi->flags); clear_bit(CH_OPENED, &pi->flags); if (pi->ch) { smd_close(pi->ch); pi->ch = NULL; } break; } } return 0; } static void gsmd_port_free(int portno) { struct gsmd_port *port = smd_ports[portno].port; if (!port) kfree(port); } static int gsmd_port_alloc(int portno, struct usb_cdc_line_coding *coding) { struct gsmd_port *port; struct platform_driver *pdrv; port = kzalloc(sizeof(struct gsmd_port), GFP_KERNEL); if (!port) return -ENOMEM; port->port_num = portno; port->pi = &smd_pi[portno]; spin_lock_init(&port->port_lock); INIT_LIST_HEAD(&port->read_pool); INIT_LIST_HEAD(&port->read_queue); INIT_WORK(&port->push, gsmd_rx_push); INIT_LIST_HEAD(&port->write_pool); INIT_WORK(&port->pull, gsmd_tx_pull); INIT_DELAYED_WORK(&port->connect_work, gsmd_connect_work); smd_ports[portno].port = port; pdrv = &smd_ports[portno].pdrv; pdrv->probe = gsmd_ch_probe; pdrv->remove = gsmd_ch_remove; pdrv->driver.name = port->pi->name; pdrv->driver.owner = THIS_MODULE; platform_driver_register(pdrv); pr_debug("%s: port:%p portno:%d\n", __func__, port, portno); return 0; } #if defined(CONFIG_DEBUG_FS) static ssize_t debug_smd_read_stats(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { struct gsmd_port *port; struct smd_port_info *pi; char *buf; unsigned long flags; int temp = 0; int i; int ret; buf = kzalloc(sizeof(char) * 512, GFP_KERNEL); if (!buf) return -ENOMEM; for (i = 0; i < n_smd_ports; i++) { port = smd_ports[i].port; pi = port->pi; spin_lock_irqsave(&port->port_lock, flags); temp += scnprintf(buf + temp, 512 - temp, "###PORT:%d###\n" "nbytes_tolaptop: %lu\n" "nbytes_tomodem: %lu\n" "cbits_to_modem: %u\n" "cbits_to_laptop: %u\n" "n_read: %u\n" "smd_read_avail: %d\n" "smd_write_avail: %d\n" "CH_OPENED: %d\n" "CH_READY: %d\n", i, port->nbytes_tolaptop, port->nbytes_tomodem, port->cbits_to_modem, port->cbits_to_laptop, port->n_read, pi->ch ? smd_read_avail(pi->ch) : 0, pi->ch ? smd_write_avail(pi->ch) : 0, test_bit(CH_OPENED, &pi->flags), test_bit(CH_READY, &pi->flags)); spin_unlock_irqrestore(&port->port_lock, flags); } ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp); kfree(buf); return ret; } static ssize_t debug_smd_reset_stats(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct gsmd_port *port; unsigned long flags; int i; for (i = 0; i < n_smd_ports; i++) { port = smd_ports[i].port; spin_lock_irqsave(&port->port_lock, flags); port->nbytes_tolaptop = 0; port->nbytes_tomodem = 0; spin_unlock_irqrestore(&port->port_lock, flags); } return count; } static int debug_smd_open(struct inode *inode, struct file *file) { return 0; } static const struct file_operations debug_gsmd_ops = { .open = debug_smd_open, .read = debug_smd_read_stats, .write = debug_smd_reset_stats, }; static void gsmd_debugfs_init(void) { struct dentry *dent; dent = debugfs_create_dir("usb_gsmd", 0); if (IS_ERR(dent)) return; debugfs_create_file("status", 0444, dent, 0, &debug_gsmd_ops); } #else static void gsmd_debugfs_init(void) {} #endif int gsmd_setup(struct usb_gadget *g, unsigned count) { struct usb_cdc_line_coding coding; int ret; int i; pr_debug("%s: g:%p count: %d\n", __func__, g, count); if (!count || count > SMD_N_PORTS) { pr_err("%s: Invalid num of ports count:%d gadget:%p\n", __func__, count, g); return -EINVAL; } coding.dwDTERate = cpu_to_le32(9600); coding.bCharFormat = 8; coding.bParityType = USB_CDC_NO_PARITY; coding.bDataBits = USB_CDC_1_STOP_BITS; gsmd_wq = create_singlethread_workqueue("k_gsmd"); if (!gsmd_wq) { pr_err("%s: Unable to create workqueue gsmd_wq\n", __func__); return -ENOMEM; } for (i = 0; i < count; i++) { mutex_init(&smd_ports[i].lock); n_smd_ports++; ret = gsmd_port_alloc(i, &coding); if (ret) { n_smd_ports--; pr_err("%s: Unable to alloc port:%d\n", __func__, i); goto free_smd_ports; } } gsmd_debugfs_init(); return 0; free_smd_ports: for (i = 0; i < n_smd_ports; i++) gsmd_port_free(i); destroy_workqueue(gsmd_wq); return ret; } void gsmd_cleanup(struct usb_gadget *g, unsigned count) { /* TBD */ }
gpl-2.0
sandymanu/sandy_lettuce_8916
sound/usb/usx2y/usbusx2yaudio.c
1868
29249
/* * US-X2Y AUDIO * Copyright (c) 2002-2004 by Karsten Wiese * * based on * * (Tentative) USB Audio Driver for ALSA * * Main and PCM part * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * Many codes borrowed from audio.c by * Alan Cox (alan@lxorguk.ukuu.org.uk) * Thomas Sailer (sailer@ife.ee.ethz.ch) * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/info.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "usx2y.h" #include "usbusx2y.h" #define USX2Y_NRPACKS 4 /* Default value used for nr of packs per urb. 1 to 4 have been tested ok on uhci. To use 3 on ohci, you'd need a patch: look for "0000425-linux-2.6.9-rc4-mm1_ohci-hcd.patch.gz" on "https://bugtrack.alsa-project.org/alsa-bug/bug_view_page.php?bug_id=0000425" . 1, 2 and 4 work out of the box on ohci, if I recall correctly. Bigger is safer operation, smaller gives lower latencies. */ #define USX2Y_NRPACKS_VARIABLE y /* If your system works ok with this module's parameter nrpacks set to 1, you might as well comment this #define out, and thereby produce smaller, faster code. You'd also set USX2Y_NRPACKS to 1 then. */ #ifdef USX2Y_NRPACKS_VARIABLE static int nrpacks = USX2Y_NRPACKS; /* number of packets per urb */ #define nr_of_packs() nrpacks module_param(nrpacks, int, 0444); MODULE_PARM_DESC(nrpacks, "Number of packets per URB."); #else #define nr_of_packs() USX2Y_NRPACKS #endif static int usX2Y_urb_capt_retire(struct snd_usX2Y_substream *subs) { struct urb *urb = subs->completed_urb; struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; unsigned char *cp; int i, len, lens = 0, hwptr_done = subs->hwptr_done; struct usX2Ydev *usX2Y = subs->usX2Y; for (i = 0; i < nr_of_packs(); i++) { cp = (unsigned char*)urb->transfer_buffer + urb->iso_frame_desc[i].offset; if (urb->iso_frame_desc[i].status) { /* active? hmm, skip this */ snd_printk(KERN_ERR "active frame status %i. " "Most probably some hardware problem.\n", urb->iso_frame_desc[i].status); return urb->iso_frame_desc[i].status; } len = urb->iso_frame_desc[i].actual_length / usX2Y->stride; if (! len) { snd_printd("0 == len ERROR!\n"); continue; } /* copy a data chunk */ if ((hwptr_done + len) > runtime->buffer_size) { int cnt = runtime->buffer_size - hwptr_done; int blen = cnt * usX2Y->stride; memcpy(runtime->dma_area + hwptr_done * usX2Y->stride, cp, blen); memcpy(runtime->dma_area, cp + blen, len * usX2Y->stride - blen); } else { memcpy(runtime->dma_area + hwptr_done * usX2Y->stride, cp, len * usX2Y->stride); } lens += len; if ((hwptr_done += len) >= runtime->buffer_size) hwptr_done -= runtime->buffer_size; } subs->hwptr_done = hwptr_done; subs->transfer_done += lens; /* update the pointer, call callback if necessary */ if (subs->transfer_done >= runtime->period_size) { subs->transfer_done -= runtime->period_size; snd_pcm_period_elapsed(subs->pcm_substream); } return 0; } /* * prepare urb for playback data pipe * * we copy the data directly from the pcm buffer. * the current position to be copied is held in hwptr field. * since a urb can handle only a single linear buffer, if the total * transferred area overflows the buffer boundary, we cannot send * it directly from the buffer. thus the data is once copied to * a temporary buffer and urb points to that. */ static int usX2Y_urb_play_prepare(struct snd_usX2Y_substream *subs, struct urb *cap_urb, struct urb *urb) { int count, counts, pack; struct usX2Ydev *usX2Y = subs->usX2Y; struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; count = 0; for (pack = 0; pack < nr_of_packs(); pack++) { /* calculate the size of a packet */ counts = cap_urb->iso_frame_desc[pack].actual_length / usX2Y->stride; count += counts; if (counts < 43 || counts > 50) { snd_printk(KERN_ERR "should not be here with counts=%i\n", counts); return -EPIPE; } /* set up descriptor */ urb->iso_frame_desc[pack].offset = pack ? urb->iso_frame_desc[pack - 1].offset + urb->iso_frame_desc[pack - 1].length : 0; urb->iso_frame_desc[pack].length = cap_urb->iso_frame_desc[pack].actual_length; } if (atomic_read(&subs->state) >= state_PRERUNNING) if (subs->hwptr + count > runtime->buffer_size) { /* err, the transferred area goes over buffer boundary. * copy the data to the temp buffer. */ int len; len = runtime->buffer_size - subs->hwptr; urb->transfer_buffer = subs->tmpbuf; memcpy(subs->tmpbuf, runtime->dma_area + subs->hwptr * usX2Y->stride, len * usX2Y->stride); memcpy(subs->tmpbuf + len * usX2Y->stride, runtime->dma_area, (count - len) * usX2Y->stride); subs->hwptr += count; subs->hwptr -= runtime->buffer_size; } else { /* set the buffer pointer */ urb->transfer_buffer = runtime->dma_area + subs->hwptr * usX2Y->stride; if ((subs->hwptr += count) >= runtime->buffer_size) subs->hwptr -= runtime->buffer_size; } else urb->transfer_buffer = subs->tmpbuf; urb->transfer_buffer_length = count * usX2Y->stride; return 0; } /* * process after playback data complete * * update the current position and call callback if a period is processed. */ static void usX2Y_urb_play_retire(struct snd_usX2Y_substream *subs, struct urb *urb) { struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; int len = urb->actual_length / subs->usX2Y->stride; subs->transfer_done += len; subs->hwptr_done += len; if (subs->hwptr_done >= runtime->buffer_size) subs->hwptr_done -= runtime->buffer_size; if (subs->transfer_done >= runtime->period_size) { subs->transfer_done -= runtime->period_size; snd_pcm_period_elapsed(subs->pcm_substream); } } static int usX2Y_urb_submit(struct snd_usX2Y_substream *subs, struct urb *urb, int frame) { int err; if (!urb) return -ENODEV; urb->start_frame = (frame + NRURBS * nr_of_packs()); // let hcd do rollover sanity checks urb->hcpriv = NULL; urb->dev = subs->usX2Y->dev; /* we need to set this at each time */ if ((err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { snd_printk(KERN_ERR "usb_submit_urb() returned %i\n", err); return err; } return 0; } static inline int usX2Y_usbframe_complete(struct snd_usX2Y_substream *capsubs, struct snd_usX2Y_substream *playbacksubs, int frame) { int err, state; struct urb *urb = playbacksubs->completed_urb; state = atomic_read(&playbacksubs->state); if (NULL != urb) { if (state == state_RUNNING) usX2Y_urb_play_retire(playbacksubs, urb); else if (state >= state_PRERUNNING) atomic_inc(&playbacksubs->state); } else { switch (state) { case state_STARTING1: urb = playbacksubs->urb[0]; atomic_inc(&playbacksubs->state); break; case state_STARTING2: urb = playbacksubs->urb[1]; atomic_inc(&playbacksubs->state); break; } } if (urb) { if ((err = usX2Y_urb_play_prepare(playbacksubs, capsubs->completed_urb, urb)) || (err = usX2Y_urb_submit(playbacksubs, urb, frame))) { return err; } } playbacksubs->completed_urb = NULL; state = atomic_read(&capsubs->state); if (state >= state_PREPARED) { if (state == state_RUNNING) { if ((err = usX2Y_urb_capt_retire(capsubs))) return err; } else if (state >= state_PRERUNNING) atomic_inc(&capsubs->state); if ((err = usX2Y_urb_submit(capsubs, capsubs->completed_urb, frame))) return err; } capsubs->completed_urb = NULL; return 0; } static void usX2Y_clients_stop(struct usX2Ydev *usX2Y) { int s, u; for (s = 0; s < 4; s++) { struct snd_usX2Y_substream *subs = usX2Y->subs[s]; if (subs) { snd_printdd("%i %p state=%i\n", s, subs, atomic_read(&subs->state)); atomic_set(&subs->state, state_STOPPED); } } for (s = 0; s < 4; s++) { struct snd_usX2Y_substream *subs = usX2Y->subs[s]; if (subs) { if (atomic_read(&subs->state) >= state_PRERUNNING) { unsigned long flags; snd_pcm_stream_lock_irqsave(subs->pcm_substream, flags); snd_pcm_stop(subs->pcm_substream, SNDRV_PCM_STATE_XRUN); snd_pcm_stream_unlock_irqrestore(subs->pcm_substream, flags); } for (u = 0; u < NRURBS; u++) { struct urb *urb = subs->urb[u]; if (NULL != urb) snd_printdd("%i status=%i start_frame=%i\n", u, urb->status, urb->start_frame); } } } usX2Y->prepare_subs = NULL; wake_up(&usX2Y->prepare_wait_queue); } static void usX2Y_error_urb_status(struct usX2Ydev *usX2Y, struct snd_usX2Y_substream *subs, struct urb *urb) { snd_printk(KERN_ERR "ep=%i stalled with status=%i\n", subs->endpoint, urb->status); urb->status = 0; usX2Y_clients_stop(usX2Y); } static void i_usX2Y_urb_complete(struct urb *urb) { struct snd_usX2Y_substream *subs = urb->context; struct usX2Ydev *usX2Y = subs->usX2Y; if (unlikely(atomic_read(&subs->state) < state_PREPARED)) { snd_printdd("hcd_frame=%i ep=%i%s status=%i start_frame=%i\n", usb_get_current_frame_number(usX2Y->dev), subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out", urb->status, urb->start_frame); return; } if (unlikely(urb->status)) { usX2Y_error_urb_status(usX2Y, subs, urb); return; } subs->completed_urb = urb; { struct snd_usX2Y_substream *capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE], *playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK]; if (capsubs->completed_urb && atomic_read(&capsubs->state) >= state_PREPARED && (playbacksubs->completed_urb || atomic_read(&playbacksubs->state) < state_PREPARED)) { if (!usX2Y_usbframe_complete(capsubs, playbacksubs, urb->start_frame)) usX2Y->wait_iso_frame += nr_of_packs(); else { snd_printdd("\n"); usX2Y_clients_stop(usX2Y); } } } } static void usX2Y_urbs_set_complete(struct usX2Ydev * usX2Y, void (*complete)(struct urb *)) { int s, u; for (s = 0; s < 4; s++) { struct snd_usX2Y_substream *subs = usX2Y->subs[s]; if (NULL != subs) for (u = 0; u < NRURBS; u++) { struct urb * urb = subs->urb[u]; if (NULL != urb) urb->complete = complete; } } } static void usX2Y_subs_startup_finish(struct usX2Ydev * usX2Y) { usX2Y_urbs_set_complete(usX2Y, i_usX2Y_urb_complete); usX2Y->prepare_subs = NULL; } static void i_usX2Y_subs_startup(struct urb *urb) { struct snd_usX2Y_substream *subs = urb->context; struct usX2Ydev *usX2Y = subs->usX2Y; struct snd_usX2Y_substream *prepare_subs = usX2Y->prepare_subs; if (NULL != prepare_subs) if (urb->start_frame == prepare_subs->urb[0]->start_frame) { usX2Y_subs_startup_finish(usX2Y); atomic_inc(&prepare_subs->state); wake_up(&usX2Y->prepare_wait_queue); } i_usX2Y_urb_complete(urb); } static void usX2Y_subs_prepare(struct snd_usX2Y_substream *subs) { snd_printdd("usX2Y_substream_prepare(%p) ep=%i urb0=%p urb1=%p\n", subs, subs->endpoint, subs->urb[0], subs->urb[1]); /* reset the pointer */ subs->hwptr = 0; subs->hwptr_done = 0; subs->transfer_done = 0; } static void usX2Y_urb_release(struct urb **urb, int free_tb) { if (*urb) { usb_kill_urb(*urb); if (free_tb) kfree((*urb)->transfer_buffer); usb_free_urb(*urb); *urb = NULL; } } /* * release a substreams urbs */ static void usX2Y_urbs_release(struct snd_usX2Y_substream *subs) { int i; snd_printdd("usX2Y_urbs_release() %i\n", subs->endpoint); for (i = 0; i < NRURBS; i++) usX2Y_urb_release(subs->urb + i, subs != subs->usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK]); kfree(subs->tmpbuf); subs->tmpbuf = NULL; } /* * initialize a substream's urbs */ static int usX2Y_urbs_allocate(struct snd_usX2Y_substream *subs) { int i; unsigned int pipe; int is_playback = subs == subs->usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK]; struct usb_device *dev = subs->usX2Y->dev; pipe = is_playback ? usb_sndisocpipe(dev, subs->endpoint) : usb_rcvisocpipe(dev, subs->endpoint); subs->maxpacksize = usb_maxpacket(dev, pipe, is_playback); if (!subs->maxpacksize) return -EINVAL; if (is_playback && NULL == subs->tmpbuf) { /* allocate a temporary buffer for playback */ subs->tmpbuf = kcalloc(nr_of_packs(), subs->maxpacksize, GFP_KERNEL); if (NULL == subs->tmpbuf) { snd_printk(KERN_ERR "cannot malloc tmpbuf\n"); return -ENOMEM; } } /* allocate and initialize data urbs */ for (i = 0; i < NRURBS; i++) { struct urb **purb = subs->urb + i; if (*purb) { usb_kill_urb(*purb); continue; } *purb = usb_alloc_urb(nr_of_packs(), GFP_KERNEL); if (NULL == *purb) { usX2Y_urbs_release(subs); return -ENOMEM; } if (!is_playback && !(*purb)->transfer_buffer) { /* allocate a capture buffer per urb */ (*purb)->transfer_buffer = kmalloc(subs->maxpacksize * nr_of_packs(), GFP_KERNEL); if (NULL == (*purb)->transfer_buffer) { usX2Y_urbs_release(subs); return -ENOMEM; } } (*purb)->dev = dev; (*purb)->pipe = pipe; (*purb)->number_of_packets = nr_of_packs(); (*purb)->context = subs; (*purb)->interval = 1; (*purb)->complete = i_usX2Y_subs_startup; } return 0; } static void usX2Y_subs_startup(struct snd_usX2Y_substream *subs) { struct usX2Ydev *usX2Y = subs->usX2Y; usX2Y->prepare_subs = subs; subs->urb[0]->start_frame = -1; wmb(); usX2Y_urbs_set_complete(usX2Y, i_usX2Y_subs_startup); } static int usX2Y_urbs_start(struct snd_usX2Y_substream *subs) { int i, err; struct usX2Ydev *usX2Y = subs->usX2Y; if ((err = usX2Y_urbs_allocate(subs)) < 0) return err; subs->completed_urb = NULL; for (i = 0; i < 4; i++) { struct snd_usX2Y_substream *subs = usX2Y->subs[i]; if (subs != NULL && atomic_read(&subs->state) >= state_PREPARED) goto start; } start: usX2Y_subs_startup(subs); for (i = 0; i < NRURBS; i++) { struct urb *urb = subs->urb[i]; if (usb_pipein(urb->pipe)) { unsigned long pack; if (0 == i) atomic_set(&subs->state, state_STARTING3); urb->dev = usX2Y->dev; for (pack = 0; pack < nr_of_packs(); pack++) { urb->iso_frame_desc[pack].offset = subs->maxpacksize * pack; urb->iso_frame_desc[pack].length = subs->maxpacksize; } urb->transfer_buffer_length = subs->maxpacksize * nr_of_packs(); if ((err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { snd_printk (KERN_ERR "cannot submit datapipe for urb %d, err = %d\n", i, err); err = -EPIPE; goto cleanup; } else if (i == 0) usX2Y->wait_iso_frame = urb->start_frame; urb->transfer_flags = 0; } else { atomic_set(&subs->state, state_STARTING1); break; } } err = 0; wait_event(usX2Y->prepare_wait_queue, NULL == usX2Y->prepare_subs); if (atomic_read(&subs->state) != state_PREPARED) err = -EPIPE; cleanup: if (err) { usX2Y_subs_startup_finish(usX2Y); usX2Y_clients_stop(usX2Y); // something is completely wroong > stop evrything } return err; } /* * return the current pcm pointer. just return the hwptr_done value. */ static snd_pcm_uframes_t snd_usX2Y_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_usX2Y_substream *subs = substream->runtime->private_data; return subs->hwptr_done; } /* * start/stop substream */ static int snd_usX2Y_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_usX2Y_substream *subs = substream->runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: snd_printdd("snd_usX2Y_pcm_trigger(START)\n"); if (atomic_read(&subs->state) == state_PREPARED && atomic_read(&subs->usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE]->state) >= state_PREPARED) { atomic_set(&subs->state, state_PRERUNNING); } else { snd_printdd("\n"); return -EPIPE; } break; case SNDRV_PCM_TRIGGER_STOP: snd_printdd("snd_usX2Y_pcm_trigger(STOP)\n"); if (atomic_read(&subs->state) >= state_PRERUNNING) atomic_set(&subs->state, state_PREPARED); break; default: return -EINVAL; } return 0; } /* * allocate a buffer, setup samplerate * * so far we use a physically linear buffer although packetize transfer * doesn't need a continuous area. * if sg buffer is supported on the later version of alsa, we'll follow * that. */ static struct s_c2 { char c1, c2; } SetRate44100[] = { { 0x14, 0x08}, // this line sets 44100, well actually a little less { 0x18, 0x40}, // only tascam / frontier design knows the further lines ....... { 0x18, 0x42}, { 0x18, 0x45}, { 0x18, 0x46}, { 0x18, 0x48}, { 0x18, 0x4A}, { 0x18, 0x4C}, { 0x18, 0x4E}, { 0x18, 0x50}, { 0x18, 0x52}, { 0x18, 0x54}, { 0x18, 0x56}, { 0x18, 0x58}, { 0x18, 0x5A}, { 0x18, 0x5C}, { 0x18, 0x5E}, { 0x18, 0x60}, { 0x18, 0x62}, { 0x18, 0x64}, { 0x18, 0x66}, { 0x18, 0x68}, { 0x18, 0x6A}, { 0x18, 0x6C}, { 0x18, 0x6E}, { 0x18, 0x70}, { 0x18, 0x72}, { 0x18, 0x74}, { 0x18, 0x76}, { 0x18, 0x78}, { 0x18, 0x7A}, { 0x18, 0x7C}, { 0x18, 0x7E} }; static struct s_c2 SetRate48000[] = { { 0x14, 0x09}, // this line sets 48000, well actually a little less { 0x18, 0x40}, // only tascam / frontier design knows the further lines ....... { 0x18, 0x42}, { 0x18, 0x45}, { 0x18, 0x46}, { 0x18, 0x48}, { 0x18, 0x4A}, { 0x18, 0x4C}, { 0x18, 0x4E}, { 0x18, 0x50}, { 0x18, 0x52}, { 0x18, 0x54}, { 0x18, 0x56}, { 0x18, 0x58}, { 0x18, 0x5A}, { 0x18, 0x5C}, { 0x18, 0x5E}, { 0x18, 0x60}, { 0x18, 0x62}, { 0x18, 0x64}, { 0x18, 0x66}, { 0x18, 0x68}, { 0x18, 0x6A}, { 0x18, 0x6C}, { 0x18, 0x6E}, { 0x18, 0x70}, { 0x18, 0x73}, { 0x18, 0x74}, { 0x18, 0x76}, { 0x18, 0x78}, { 0x18, 0x7A}, { 0x18, 0x7C}, { 0x18, 0x7E} }; #define NOOF_SETRATE_URBS ARRAY_SIZE(SetRate48000) static void i_usX2Y_04Int(struct urb *urb) { struct usX2Ydev *usX2Y = urb->context; if (urb->status) snd_printk(KERN_ERR "snd_usX2Y_04Int() urb->status=%i\n", urb->status); if (0 == --usX2Y->US04->len) wake_up(&usX2Y->In04WaitQueue); } static int usX2Y_rate_set(struct usX2Ydev *usX2Y, int rate) { int err = 0, i; struct snd_usX2Y_urbSeq *us = NULL; int *usbdata = NULL; struct s_c2 *ra = rate == 48000 ? SetRate48000 : SetRate44100; if (usX2Y->rate != rate) { us = kzalloc(sizeof(*us) + sizeof(struct urb*) * NOOF_SETRATE_URBS, GFP_KERNEL); if (NULL == us) { err = -ENOMEM; goto cleanup; } usbdata = kmalloc(sizeof(int) * NOOF_SETRATE_URBS, GFP_KERNEL); if (NULL == usbdata) { err = -ENOMEM; goto cleanup; } for (i = 0; i < NOOF_SETRATE_URBS; ++i) { if (NULL == (us->urb[i] = usb_alloc_urb(0, GFP_KERNEL))) { err = -ENOMEM; goto cleanup; } ((char*)(usbdata + i))[0] = ra[i].c1; ((char*)(usbdata + i))[1] = ra[i].c2; usb_fill_bulk_urb(us->urb[i], usX2Y->dev, usb_sndbulkpipe(usX2Y->dev, 4), usbdata + i, 2, i_usX2Y_04Int, usX2Y); #ifdef OLD_USB us->urb[i]->transfer_flags = USB_QUEUE_BULK; #endif } us->submitted = 0; us->len = NOOF_SETRATE_URBS; usX2Y->US04 = us; wait_event_timeout(usX2Y->In04WaitQueue, 0 == us->len, HZ); usX2Y->US04 = NULL; if (us->len) err = -ENODEV; cleanup: if (us) { us->submitted = 2*NOOF_SETRATE_URBS; for (i = 0; i < NOOF_SETRATE_URBS; ++i) { struct urb *urb = us->urb[i]; if (urb->status) { if (!err) err = -ENODEV; usb_kill_urb(urb); } usb_free_urb(urb); } usX2Y->US04 = NULL; kfree(usbdata); kfree(us); if (!err) usX2Y->rate = rate; } } return err; } static int usX2Y_format_set(struct usX2Ydev *usX2Y, snd_pcm_format_t format) { int alternate, err; struct list_head* p; if (format == SNDRV_PCM_FORMAT_S24_3LE) { alternate = 2; usX2Y->stride = 6; } else { alternate = 1; usX2Y->stride = 4; } list_for_each(p, &usX2Y->midi_list) { snd_usbmidi_input_stop(p); } usb_kill_urb(usX2Y->In04urb); if ((err = usb_set_interface(usX2Y->dev, 0, alternate))) { snd_printk(KERN_ERR "usb_set_interface error \n"); return err; } usX2Y->In04urb->dev = usX2Y->dev; err = usb_submit_urb(usX2Y->In04urb, GFP_KERNEL); list_for_each(p, &usX2Y->midi_list) { snd_usbmidi_input_start(p); } usX2Y->format = format; usX2Y->rate = 0; return err; } static int snd_usX2Y_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { int err = 0; unsigned int rate = params_rate(hw_params); snd_pcm_format_t format = params_format(hw_params); struct snd_card *card = substream->pstr->pcm->card; struct list_head *list; snd_printdd("snd_usX2Y_hw_params(%p, %p)\n", substream, hw_params); // all pcm substreams off one usX2Y have to operate at the same rate & format list_for_each(list, &card->devices) { struct snd_device *dev; struct snd_pcm *pcm; int s; dev = snd_device(list); if (dev->type != SNDRV_DEV_PCM) continue; pcm = dev->device_data; for (s = 0; s < 2; ++s) { struct snd_pcm_substream *test_substream; test_substream = pcm->streams[s].substream; if (test_substream && test_substream != substream && test_substream->runtime && ((test_substream->runtime->format && test_substream->runtime->format != format) || (test_substream->runtime->rate && test_substream->runtime->rate != rate))) return -EINVAL; } } if (0 > (err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)))) { snd_printk(KERN_ERR "snd_pcm_lib_malloc_pages(%p, %i) returned %i\n", substream, params_buffer_bytes(hw_params), err); return err; } return 0; } /* * free the buffer */ static int snd_usX2Y_pcm_hw_free(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_usX2Y_substream *subs = runtime->private_data; mutex_lock(&subs->usX2Y->prepare_mutex); snd_printdd("snd_usX2Y_hw_free(%p)\n", substream); if (SNDRV_PCM_STREAM_PLAYBACK == substream->stream) { struct snd_usX2Y_substream *cap_subs = subs->usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE]; atomic_set(&subs->state, state_STOPPED); usX2Y_urbs_release(subs); if (!cap_subs->pcm_substream || !cap_subs->pcm_substream->runtime || !cap_subs->pcm_substream->runtime->status || cap_subs->pcm_substream->runtime->status->state < SNDRV_PCM_STATE_PREPARED) { atomic_set(&cap_subs->state, state_STOPPED); usX2Y_urbs_release(cap_subs); } } else { struct snd_usX2Y_substream *playback_subs = subs->usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK]; if (atomic_read(&playback_subs->state) < state_PREPARED) { atomic_set(&subs->state, state_STOPPED); usX2Y_urbs_release(subs); } } mutex_unlock(&subs->usX2Y->prepare_mutex); return snd_pcm_lib_free_pages(substream); } /* * prepare callback * * set format and initialize urbs */ static int snd_usX2Y_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_usX2Y_substream *subs = runtime->private_data; struct usX2Ydev *usX2Y = subs->usX2Y; struct snd_usX2Y_substream *capsubs = subs->usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE]; int err = 0; snd_printdd("snd_usX2Y_pcm_prepare(%p)\n", substream); mutex_lock(&usX2Y->prepare_mutex); usX2Y_subs_prepare(subs); // Start hardware streams // SyncStream first.... if (atomic_read(&capsubs->state) < state_PREPARED) { if (usX2Y->format != runtime->format) if ((err = usX2Y_format_set(usX2Y, runtime->format)) < 0) goto up_prepare_mutex; if (usX2Y->rate != runtime->rate) if ((err = usX2Y_rate_set(usX2Y, runtime->rate)) < 0) goto up_prepare_mutex; snd_printdd("starting capture pipe for %s\n", subs == capsubs ? "self" : "playpipe"); if (0 > (err = usX2Y_urbs_start(capsubs))) goto up_prepare_mutex; } if (subs != capsubs && atomic_read(&subs->state) < state_PREPARED) err = usX2Y_urbs_start(subs); up_prepare_mutex: mutex_unlock(&usX2Y->prepare_mutex); return err; } static struct snd_pcm_hardware snd_usX2Y_2c = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH), .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE, .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, .rate_min = 44100, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (2*128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0 }; static int snd_usX2Y_pcm_open(struct snd_pcm_substream *substream) { struct snd_usX2Y_substream *subs = ((struct snd_usX2Y_substream **) snd_pcm_substream_chip(substream))[substream->stream]; struct snd_pcm_runtime *runtime = substream->runtime; if (subs->usX2Y->chip_status & USX2Y_STAT_CHIP_MMAP_PCM_URBS) return -EBUSY; runtime->hw = snd_usX2Y_2c; runtime->private_data = subs; subs->pcm_substream = substream; snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 1000, 200000); return 0; } static int snd_usX2Y_pcm_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_usX2Y_substream *subs = runtime->private_data; subs->pcm_substream = NULL; return 0; } static struct snd_pcm_ops snd_usX2Y_pcm_ops = { .open = snd_usX2Y_pcm_open, .close = snd_usX2Y_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_usX2Y_pcm_hw_params, .hw_free = snd_usX2Y_pcm_hw_free, .prepare = snd_usX2Y_pcm_prepare, .trigger = snd_usX2Y_pcm_trigger, .pointer = snd_usX2Y_pcm_pointer, }; /* * free a usb stream instance */ static void usX2Y_audio_stream_free(struct snd_usX2Y_substream **usX2Y_substream) { kfree(usX2Y_substream[SNDRV_PCM_STREAM_PLAYBACK]); usX2Y_substream[SNDRV_PCM_STREAM_PLAYBACK] = NULL; kfree(usX2Y_substream[SNDRV_PCM_STREAM_CAPTURE]); usX2Y_substream[SNDRV_PCM_STREAM_CAPTURE] = NULL; } static void snd_usX2Y_pcm_private_free(struct snd_pcm *pcm) { struct snd_usX2Y_substream **usX2Y_stream = pcm->private_data; if (usX2Y_stream) usX2Y_audio_stream_free(usX2Y_stream); } static int usX2Y_audio_stream_new(struct snd_card *card, int playback_endpoint, int capture_endpoint) { struct snd_pcm *pcm; int err, i; struct snd_usX2Y_substream **usX2Y_substream = usX2Y(card)->subs + 2 * usX2Y(card)->pcm_devs; for (i = playback_endpoint ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE; i <= SNDRV_PCM_STREAM_CAPTURE; ++i) { usX2Y_substream[i] = kzalloc(sizeof(struct snd_usX2Y_substream), GFP_KERNEL); if (NULL == usX2Y_substream[i]) { snd_printk(KERN_ERR "cannot malloc\n"); return -ENOMEM; } usX2Y_substream[i]->usX2Y = usX2Y(card); } if (playback_endpoint) usX2Y_substream[SNDRV_PCM_STREAM_PLAYBACK]->endpoint = playback_endpoint; usX2Y_substream[SNDRV_PCM_STREAM_CAPTURE]->endpoint = capture_endpoint; err = snd_pcm_new(card, NAME_ALLCAPS" Audio", usX2Y(card)->pcm_devs, playback_endpoint ? 1 : 0, 1, &pcm); if (err < 0) { usX2Y_audio_stream_free(usX2Y_substream); return err; } if (playback_endpoint) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_usX2Y_pcm_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usX2Y_pcm_ops); pcm->private_data = usX2Y_substream; pcm->private_free = snd_usX2Y_pcm_private_free; pcm->info_flags = 0; sprintf(pcm->name, NAME_ALLCAPS" Audio #%d", usX2Y(card)->pcm_devs); if ((playback_endpoint && 0 > (err = snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream, SNDRV_DMA_TYPE_CONTINUOUS, snd_dma_continuous_data(GFP_KERNEL), 64*1024, 128*1024))) || 0 > (err = snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream, SNDRV_DMA_TYPE_CONTINUOUS, snd_dma_continuous_data(GFP_KERNEL), 64*1024, 128*1024))) { snd_usX2Y_pcm_private_free(pcm); return err; } usX2Y(card)->pcm_devs++; return 0; } /* * create a chip instance and set its names. */ int usX2Y_audio_create(struct snd_card *card) { int err = 0; INIT_LIST_HEAD(&usX2Y(card)->pcm_list); if (0 > (err = usX2Y_audio_stream_new(card, 0xA, 0x8))) return err; if (le16_to_cpu(usX2Y(card)->dev->descriptor.idProduct) == USB_ID_US428) if (0 > (err = usX2Y_audio_stream_new(card, 0, 0xA))) return err; if (le16_to_cpu(usX2Y(card)->dev->descriptor.idProduct) != USB_ID_US122) err = usX2Y_rate_set(usX2Y(card), 44100); // Lets us428 recognize output-volume settings, disturbs us122. return err; }
gpl-2.0
jyizheng/net-next-nuse-old
drivers/video/fbdev/skeletonfb.c
1868
36774
/* * linux/drivers/video/skeletonfb.c -- Skeleton for a frame buffer device * * Modified to new api Jan 2001 by James Simmons (jsimmons@transvirtual.com) * * Created 28 Dec 1997 by Geert Uytterhoeven * * * I have started rewriting this driver as a example of the upcoming new API * The primary goal is to remove the console code from fbdev and place it * into fbcon.c. This reduces the code and makes writing a new fbdev driver * easy since the author doesn't need to worry about console internals. It * also allows the ability to run fbdev without a console/tty system on top * of it. * * First the roles of struct fb_info and struct display have changed. Struct * display will go away. The way the new framebuffer console code will * work is that it will act to translate data about the tty/console in * struct vc_data to data in a device independent way in struct fb_info. Then * various functions in struct fb_ops will be called to store the device * dependent state in the par field in struct fb_info and to change the * hardware to that state. This allows a very clean separation of the fbdev * layer from the console layer. It also allows one to use fbdev on its own * which is a bounus for embedded devices. The reason this approach works is * for each framebuffer device when used as a tty/console device is allocated * a set of virtual terminals to it. Only one virtual terminal can be active * per framebuffer device. We already have all the data we need in struct * vc_data so why store a bunch of colormaps and other fbdev specific data * per virtual terminal. * * As you can see doing this makes the con parameter pretty much useless * for struct fb_ops functions, as it should be. Also having struct * fb_var_screeninfo and other data in fb_info pretty much eliminates the * need for get_fix and get_var. Once all drivers use the fix, var, and cmap * fbcon can be written around these fields. This will also eliminate the * need to regenerate struct fb_var_screeninfo, struct fb_fix_screeninfo * struct fb_cmap every time get_var, get_fix, get_cmap functions are called * as many drivers do now. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/pci.h> /* * This is just simple sample code. * * No warranty that it actually compiles. * Even less warranty that it actually works :-) */ /* * Driver data */ static char *mode_option; /* * If your driver supports multiple boards, you should make the * below data types arrays, or allocate them dynamically (using kmalloc()). */ /* * This structure defines the hardware state of the graphics card. Normally * you place this in a header file in linux/include/video. This file usually * also includes register information. That allows other driver subsystems * and userland applications the ability to use the same header file to * avoid duplicate work and easy porting of software. */ struct xxx_par; /* * Here we define the default structs fb_fix_screeninfo and fb_var_screeninfo * if we don't use modedb. If we do use modedb see xxxfb_init how to use it * to get a fb_var_screeninfo. Otherwise define a default var as well. */ static struct fb_fix_screeninfo xxxfb_fix = { .id = "FB's name", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, .xpanstep = 1, .ypanstep = 1, .ywrapstep = 1, .accel = FB_ACCEL_NONE, }; /* * Modern graphical hardware not only supports pipelines but some * also support multiple monitors where each display can have its * its own unique data. In this case each display could be * represented by a separate framebuffer device thus a separate * struct fb_info. Now the struct xxx_par represents the graphics * hardware state thus only one exist per card. In this case the * struct xxx_par for each graphics card would be shared between * every struct fb_info that represents a framebuffer on that card. * This allows when one display changes it video resolution (info->var) * the other displays know instantly. Each display can always be * aware of the entire hardware state that affects it because they share * the same xxx_par struct. The other side of the coin is multiple * graphics cards that pass data around until it is finally displayed * on one monitor. Such examples are the voodoo 1 cards and high end * NUMA graphics servers. For this case we have a bunch of pars, each * one that represents a graphics state, that belong to one struct * fb_info. Their you would want to have *par point to a array of device * states and have each struct fb_ops function deal with all those * states. I hope this covers every possible hardware design. If not * feel free to send your ideas at jsimmons@users.sf.net */ /* * If your driver supports multiple boards or it supports multiple * framebuffers, you should make these arrays, or allocate them * dynamically using framebuffer_alloc() and free them with * framebuffer_release(). */ static struct fb_info info; /* * Each one represents the state of the hardware. Most hardware have * just one hardware state. These here represent the default state(s). */ static struct xxx_par __initdata current_par; int xxxfb_init(void); /** * xxxfb_open - Optional function. Called when the framebuffer is * first accessed. * @info: frame buffer structure that represents a single frame buffer * @user: tell us if the userland (value=1) or the console is accessing * the framebuffer. * * This function is the first function called in the framebuffer api. * Usually you don't need to provide this function. The case where it * is used is to change from a text mode hardware state to a graphics * mode state. * * Returns negative errno on error, or zero on success. */ static int xxxfb_open(struct fb_info *info, int user) { return 0; } /** * xxxfb_release - Optional function. Called when the framebuffer * device is closed. * @info: frame buffer structure that represents a single frame buffer * @user: tell us if the userland (value=1) or the console is accessing * the framebuffer. * * Thus function is called when we close /dev/fb or the framebuffer * console system is released. Usually you don't need this function. * The case where it is usually used is to go from a graphics state * to a text mode state. * * Returns negative errno on error, or zero on success. */ static int xxxfb_release(struct fb_info *info, int user) { return 0; } /** * xxxfb_check_var - Optional function. Validates a var passed in. * @var: frame buffer variable screen structure * @info: frame buffer structure that represents a single frame buffer * * Checks to see if the hardware supports the state requested by * var passed in. This function does not alter the hardware state!!! * This means the data stored in struct fb_info and struct xxx_par do * not change. This includes the var inside of struct fb_info. * Do NOT change these. This function can be called on its own if we * intent to only test a mode and not actually set it. The stuff in * modedb.c is a example of this. If the var passed in is slightly * off by what the hardware can support then we alter the var PASSED in * to what we can do. * * For values that are off, this function must round them _up_ to the * next value that is supported by the hardware. If the value is * greater than the highest value supported by the hardware, then this * function must return -EINVAL. * * Exception to the above rule: Some drivers have a fixed mode, ie, * the hardware is already set at boot up, and cannot be changed. In * this case, it is more acceptable that this function just return * a copy of the currently working var (info->var). Better is to not * implement this function, as the upper layer will do the copying * of the current var for you. * * Note: This is the only function where the contents of var can be * freely adjusted after the driver has been registered. If you find * that you have code outside of this function that alters the content * of var, then you are doing something wrong. Note also that the * contents of info->var must be left untouched at all times after * driver registration. * * Returns negative errno on error, or zero on success. */ static int xxxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { /* ... */ return 0; } /** * xxxfb_set_par - Optional function. Alters the hardware state. * @info: frame buffer structure that represents a single frame buffer * * Using the fb_var_screeninfo in fb_info we set the resolution of the * this particular framebuffer. This function alters the par AND the * fb_fix_screeninfo stored in fb_info. It doesn't not alter var in * fb_info since we are using that data. This means we depend on the * data in var inside fb_info to be supported by the hardware. * * This function is also used to recover/restore the hardware to a * known working state. * * xxxfb_check_var is always called before xxxfb_set_par to ensure that * the contents of var is always valid. * * Again if you can't change the resolution you don't need this function. * * However, even if your hardware does not support mode changing, * a set_par might be needed to at least initialize the hardware to * a known working state, especially if it came back from another * process that also modifies the same hardware, such as X. * * If this is the case, a combination such as the following should work: * * static int xxxfb_check_var(struct fb_var_screeninfo *var, * struct fb_info *info) * { * *var = info->var; * return 0; * } * * static int xxxfb_set_par(struct fb_info *info) * { * init your hardware here * } * * Returns negative errno on error, or zero on success. */ static int xxxfb_set_par(struct fb_info *info) { struct xxx_par *par = info->par; /* ... */ return 0; } /** * xxxfb_setcolreg - Optional function. Sets a color register. * @regno: Which register in the CLUT we are programming * @red: The red value which can be up to 16 bits wide * @green: The green value which can be up to 16 bits wide * @blue: The blue value which can be up to 16 bits wide. * @transp: If supported, the alpha value which can be up to 16 bits wide. * @info: frame buffer info structure * * Set a single color register. The values supplied have a 16 bit * magnitude which needs to be scaled in this function for the hardware. * Things to take into consideration are how many color registers, if * any, are supported with the current color visual. With truecolor mode * no color palettes are supported. Here a pseudo palette is created * which we store the value in pseudo_palette in struct fb_info. For * pseudocolor mode we have a limited color palette. To deal with this * we can program what color is displayed for a particular pixel value. * DirectColor is similar in that we can program each color field. If * we have a static colormap we don't need to implement this function. * * Returns negative errno on error, or zero on success. */ static int xxxfb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { if (regno >= 256) /* no. of hw registers */ return -EINVAL; /* * Program hardware... do anything you want with transp */ /* grayscale works only partially under directcolor */ if (info->var.grayscale) { /* grayscale = 0.30*R + 0.59*G + 0.11*B */ red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8; } /* Directcolor: * var->{color}.offset contains start of bitfield * var->{color}.length contains length of bitfield * {hardwarespecific} contains width of DAC * pseudo_palette[X] is programmed to (X << red.offset) | * (X << green.offset) | * (X << blue.offset) * RAMDAC[X] is programmed to (red, green, blue) * color depth = SUM(var->{color}.length) * * Pseudocolor: * var->{color}.offset is 0 unless the palette index takes less than * bits_per_pixel bits and is stored in the upper * bits of the pixel value * var->{color}.length is set so that 1 << length is the number of * available palette entries * pseudo_palette is not used * RAMDAC[X] is programmed to (red, green, blue) * color depth = var->{color}.length * * Static pseudocolor: * same as Pseudocolor, but the RAMDAC is not programmed (read-only) * * Mono01/Mono10: * Has only 2 values, black on white or white on black (fg on bg), * var->{color}.offset is 0 * white = (1 << var->{color}.length) - 1, black = 0 * pseudo_palette is not used * RAMDAC does not exist * color depth is always 2 * * Truecolor: * does not use RAMDAC (usually has 3 of them). * var->{color}.offset contains start of bitfield * var->{color}.length contains length of bitfield * pseudo_palette is programmed to (red << red.offset) | * (green << green.offset) | * (blue << blue.offset) | * (transp << transp.offset) * RAMDAC does not exist * color depth = SUM(var->{color}.length}) * * The color depth is used by fbcon for choosing the logo and also * for color palette transformation if color depth < 4 * * As can be seen from the above, the field bits_per_pixel is _NOT_ * a criteria for describing the color visual. * * A common mistake is assuming that bits_per_pixel <= 8 is pseudocolor, * and higher than that, true/directcolor. This is incorrect, one needs * to look at the fix->visual. * * Another common mistake is using bits_per_pixel to calculate the color * depth. The bits_per_pixel field does not directly translate to color * depth. You have to compute for the color depth (using the color * bitfields) and fix->visual as seen above. */ /* * This is the point where the color is converted to something that * is acceptable by the hardware. */ #define CNVT_TOHW(val,width) ((((val)<<(width))+0x7FFF-(val))>>16) red = CNVT_TOHW(red, info->var.red.length); green = CNVT_TOHW(green, info->var.green.length); blue = CNVT_TOHW(blue, info->var.blue.length); transp = CNVT_TOHW(transp, info->var.transp.length); #undef CNVT_TOHW /* * This is the point where the function feeds the color to the hardware * palette after converting the colors to something acceptable by * the hardware. Note, only FB_VISUAL_DIRECTCOLOR and * FB_VISUAL_PSEUDOCOLOR visuals need to write to the hardware palette. * If you have code that writes to the hardware CLUT, and it's not * any of the above visuals, then you are doing something wrong. */ if (info->fix.visual == FB_VISUAL_DIRECTCOLOR || info->fix.visual == FB_VISUAL_TRUECOLOR) write_{red|green|blue|transp}_to_clut(); /* This is the point were you need to fill up the contents of * info->pseudo_palette. This structure is used _only_ by fbcon, thus * it only contains 16 entries to match the number of colors supported * by the console. The pseudo_palette is used only if the visual is * in directcolor or truecolor mode. With other visuals, the * pseudo_palette is not used. (This might change in the future.) * * The contents of the pseudo_palette is in raw pixel format. Ie, each * entry can be written directly to the framebuffer without any conversion. * The pseudo_palette is (void *). However, if using the generic * drawing functions (cfb_imageblit, cfb_fillrect), the pseudo_palette * must be casted to (u32 *) _regardless_ of the bits per pixel. If the * driver is using its own drawing functions, then it can use whatever * size it wants. */ if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { u32 v; if (regno >= 16) return -EINVAL; v = (red << info->var.red.offset) | (green << info->var.green.offset) | (blue << info->var.blue.offset) | (transp << info->var.transp.offset); ((u32*)(info->pseudo_palette))[regno] = v; } /* ... */ return 0; } /** * xxxfb_pan_display - NOT a required function. Pans the display. * @var: frame buffer variable screen structure * @info: frame buffer structure that represents a single frame buffer * * Pan (or wrap, depending on the `vmode' field) the display using the * `xoffset' and `yoffset' fields of the `var' structure. * If the values don't fit, return -EINVAL. * * Returns negative errno on error, or zero on success. */ static int xxxfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { /* * If your hardware does not support panning, _do_ _not_ implement this * function. Creating a dummy function will just confuse user apps. */ /* * Note that even if this function is fully functional, a setting of * 0 in both xpanstep and ypanstep means that this function will never * get called. */ /* ... */ return 0; } /** * xxxfb_blank - NOT a required function. Blanks the display. * @blank_mode: the blank mode we want. * @info: frame buffer structure that represents a single frame buffer * * Blank the screen if blank_mode != FB_BLANK_UNBLANK, else unblank. * Return 0 if blanking succeeded, != 0 if un-/blanking failed due to * e.g. a video mode which doesn't support it. * * Implements VESA suspend and powerdown modes on hardware that supports * disabling hsync/vsync: * * FB_BLANK_NORMAL = display is blanked, syncs are on. * FB_BLANK_HSYNC_SUSPEND = hsync off * FB_BLANK_VSYNC_SUSPEND = vsync off * FB_BLANK_POWERDOWN = hsync and vsync off * * If implementing this function, at least support FB_BLANK_UNBLANK. * Return !0 for any modes that are unimplemented. * */ static int xxxfb_blank(int blank_mode, struct fb_info *info) { /* ... */ return 0; } /* ------------ Accelerated Functions --------------------- */ /* * We provide our own functions if we have hardware acceleration * or non packed pixel format layouts. If we have no hardware * acceleration, we can use a generic unaccelerated function. If using * a pack pixel format just use the functions in cfb_*.c. Each file * has one of the three different accel functions we support. */ /** * xxxfb_fillrect - REQUIRED function. Can use generic routines if * non acclerated hardware and packed pixel based. * Draws a rectangle on the screen. * * @info: frame buffer structure that represents a single frame buffer * @region: The structure representing the rectangular region we * wish to draw to. * * This drawing operation places/removes a retangle on the screen * depending on the rastering operation with the value of color which * is in the current color depth format. */ void xxxfb_fillrect(struct fb_info *p, const struct fb_fillrect *region) { /* Meaning of struct fb_fillrect * * @dx: The x and y corrdinates of the upper left hand corner of the * @dy: area we want to draw to. * @width: How wide the rectangle is we want to draw. * @height: How tall the rectangle is we want to draw. * @color: The color to fill in the rectangle with. * @rop: The raster operation. We can draw the rectangle with a COPY * of XOR which provides erasing effect. */ } /** * xxxfb_copyarea - REQUIRED function. Can use generic routines if * non acclerated hardware and packed pixel based. * Copies one area of the screen to another area. * * @info: frame buffer structure that represents a single frame buffer * @area: Structure providing the data to copy the framebuffer contents * from one region to another. * * This drawing operation copies a rectangular area from one area of the * screen to another area. */ void xxxfb_copyarea(struct fb_info *p, const struct fb_copyarea *area) { /* * @dx: The x and y coordinates of the upper left hand corner of the * @dy: destination area on the screen. * @width: How wide the rectangle is we want to copy. * @height: How tall the rectangle is we want to copy. * @sx: The x and y coordinates of the upper left hand corner of the * @sy: source area on the screen. */ } /** * xxxfb_imageblit - REQUIRED function. Can use generic routines if * non acclerated hardware and packed pixel based. * Copies a image from system memory to the screen. * * @info: frame buffer structure that represents a single frame buffer * @image: structure defining the image. * * This drawing operation draws a image on the screen. It can be a * mono image (needed for font handling) or a color image (needed for * tux). */ void xxxfb_imageblit(struct fb_info *p, const struct fb_image *image) { /* * @dx: The x and y coordinates of the upper left hand corner of the * @dy: destination area to place the image on the screen. * @width: How wide the image is we want to copy. * @height: How tall the image is we want to copy. * @fg_color: For mono bitmap images this is color data for * @bg_color: the foreground and background of the image to * write directly to the frmaebuffer. * @depth: How many bits represent a single pixel for this image. * @data: The actual data used to construct the image on the display. * @cmap: The colormap used for color images. */ /* * The generic function, cfb_imageblit, expects that the bitmap scanlines are * padded to the next byte. Most hardware accelerators may require padding to * the next u16 or the next u32. If that is the case, the driver can specify * this by setting info->pixmap.scan_align = 2 or 4. See a more * comprehensive description of the pixmap below. */ } /** * xxxfb_cursor - OPTIONAL. If your hardware lacks support * for a cursor, leave this field NULL. * * @info: frame buffer structure that represents a single frame buffer * @cursor: structure defining the cursor to draw. * * This operation is used to set or alter the properities of the * cursor. * * Returns negative errno on error, or zero on success. */ int xxxfb_cursor(struct fb_info *info, struct fb_cursor *cursor) { /* * @set: Which fields we are altering in struct fb_cursor * @enable: Disable or enable the cursor * @rop: The bit operation we want to do. * @mask: This is the cursor mask bitmap. * @dest: A image of the area we are going to display the cursor. * Used internally by the driver. * @hot: The hot spot. * @image: The actual data for the cursor image. * * NOTES ON FLAGS (cursor->set): * * FB_CUR_SETIMAGE - the cursor image has changed (cursor->image.data) * FB_CUR_SETPOS - the cursor position has changed (cursor->image.dx|dy) * FB_CUR_SETHOT - the cursor hot spot has changed (cursor->hot.dx|dy) * FB_CUR_SETCMAP - the cursor colors has changed (cursor->fg_color|bg_color) * FB_CUR_SETSHAPE - the cursor bitmask has changed (cursor->mask) * FB_CUR_SETSIZE - the cursor size has changed (cursor->width|height) * FB_CUR_SETALL - everything has changed * * NOTES ON ROPs (cursor->rop, Raster Operation) * * ROP_XOR - cursor->image.data XOR cursor->mask * ROP_COPY - curosr->image.data AND cursor->mask * * OTHER NOTES: * * - fbcon only supports a 2-color cursor (cursor->image.depth = 1) * - The fb_cursor structure, @cursor, _will_ always contain valid * fields, whether any particular bitfields in cursor->set is set * or not. */ } /** * xxxfb_rotate - NOT a required function. If your hardware * supports rotation the whole screen then * you would provide a hook for this. * * @info: frame buffer structure that represents a single frame buffer * @angle: The angle we rotate the screen. * * This operation is used to set or alter the properities of the * cursor. */ void xxxfb_rotate(struct fb_info *info, int angle) { /* Will be deprecated */ } /** * xxxfb_sync - NOT a required function. Normally the accel engine * for a graphics card take a specific amount of time. * Often we have to wait for the accelerator to finish * its operation before we can write to the framebuffer * so we can have consistent display output. * * @info: frame buffer structure that represents a single frame buffer * * If the driver has implemented its own hardware-based drawing function, * implementing this function is highly recommended. */ int xxxfb_sync(struct fb_info *info) { return 0; } /* * Frame buffer operations */ static struct fb_ops xxxfb_ops = { .owner = THIS_MODULE, .fb_open = xxxfb_open, .fb_read = xxxfb_read, .fb_write = xxxfb_write, .fb_release = xxxfb_release, .fb_check_var = xxxfb_check_var, .fb_set_par = xxxfb_set_par, .fb_setcolreg = xxxfb_setcolreg, .fb_blank = xxxfb_blank, .fb_pan_display = xxxfb_pan_display, .fb_fillrect = xxxfb_fillrect, /* Needed !!! */ .fb_copyarea = xxxfb_copyarea, /* Needed !!! */ .fb_imageblit = xxxfb_imageblit, /* Needed !!! */ .fb_cursor = xxxfb_cursor, /* Optional !!! */ .fb_rotate = xxxfb_rotate, .fb_sync = xxxfb_sync, .fb_ioctl = xxxfb_ioctl, .fb_mmap = xxxfb_mmap, }; /* ------------------------------------------------------------------------- */ /* * Initialization */ /* static int __init xxfb_probe (struct platform_device *pdev) -- for platform devs */ static int xxxfb_probe(struct pci_dev *dev, const struct pci_device_id *ent) { struct fb_info *info; struct xxx_par *par; struct device *device = &dev->dev; /* or &pdev->dev */ int cmap_len, retval; /* * Dynamically allocate info and par */ info = framebuffer_alloc(sizeof(struct xxx_par), device); if (!info) { /* goto error path */ } par = info->par; /* * Here we set the screen_base to the virtual memory address * for the framebuffer. Usually we obtain the resource address * from the bus layer and then translate it to virtual memory * space via ioremap. Consult ioport.h. */ info->screen_base = framebuffer_virtual_memory; info->fbops = &xxxfb_ops; info->fix = xxxfb_fix; info->pseudo_palette = pseudo_palette; /* The pseudopalette is an * 16-member array */ /* * Set up flags to indicate what sort of acceleration your * driver can provide (pan/wrap/copyarea/etc.) and whether it * is a module -- see FBINFO_* in include/linux/fb.h * * If your hardware can support any of the hardware accelerated functions * fbcon performance will improve if info->flags is set properly. * * FBINFO_HWACCEL_COPYAREA - hardware moves * FBINFO_HWACCEL_FILLRECT - hardware fills * FBINFO_HWACCEL_IMAGEBLIT - hardware mono->color expansion * FBINFO_HWACCEL_YPAN - hardware can pan display in y-axis * FBINFO_HWACCEL_YWRAP - hardware can wrap display in y-axis * FBINFO_HWACCEL_DISABLED - supports hardware accels, but disabled * FBINFO_READS_FAST - if set, prefer moves over mono->color expansion * FBINFO_MISC_TILEBLITTING - hardware can do tile blits * * NOTE: These are for fbcon use only. */ info->flags = FBINFO_DEFAULT; /********************* This stage is optional ******************************/ /* * The struct pixmap is a scratch pad for the drawing functions. This * is where the monochrome bitmap is constructed by the higher layers * and then passed to the accelerator. For drivers that uses * cfb_imageblit, you can skip this part. For those that have a more * rigorous requirement, this stage is needed */ /* PIXMAP_SIZE should be small enough to optimize drawing, but not * large enough that memory is wasted. A safe size is * (max_xres * max_font_height/8). max_xres is driver dependent, * max_font_height is 32. */ info->pixmap.addr = kmalloc(PIXMAP_SIZE, GFP_KERNEL); if (!info->pixmap.addr) { /* goto error */ } info->pixmap.size = PIXMAP_SIZE; /* * FB_PIXMAP_SYSTEM - memory is in system ram * FB_PIXMAP_IO - memory is iomapped * FB_PIXMAP_SYNC - if set, will call fb_sync() per access to pixmap, * usually if FB_PIXMAP_IO is set. * * Currently, FB_PIXMAP_IO is unimplemented. */ info->pixmap.flags = FB_PIXMAP_SYSTEM; /* * scan_align is the number of padding for each scanline. It is in bytes. * Thus for accelerators that need padding to the next u32, put 4 here. */ info->pixmap.scan_align = 4; /* * buf_align is the amount to be padded for the buffer. For example, * the i810fb needs a scan_align of 2 but expects it to be fed with * dwords, so a buf_align = 4 is required. */ info->pixmap.buf_align = 4; /* access_align is how many bits can be accessed from the framebuffer * ie. some epson cards allow 16-bit access only. Most drivers will * be safe with u32 here. * * NOTE: This field is currently unused. */ info->pixmap.access_align = 32; /***************************** End optional stage ***************************/ /* * This should give a reasonable default video mode. The following is * done when we can set a video mode. */ if (!mode_option) mode_option = "640x480@60"; retval = fb_find_mode(&info->var, info, mode_option, NULL, 0, NULL, 8); if (!retval || retval == 4) return -EINVAL; /* This has to be done! */ if (fb_alloc_cmap(&info->cmap, cmap_len, 0)) return -ENOMEM; /* * The following is done in the case of having hardware with a static * mode. If we are setting the mode ourselves we don't call this. */ info->var = xxxfb_var; /* * For drivers that can... */ xxxfb_check_var(&info->var, info); /* * Does a call to fb_set_par() before register_framebuffer needed? This * will depend on you and the hardware. If you are sure that your driver * is the only device in the system, a call to fb_set_par() is safe. * * Hardware in x86 systems has a VGA core. Calling set_par() at this * point will corrupt the VGA console, so it might be safer to skip a * call to set_par here and just allow fbcon to do it for you. */ /* xxxfb_set_par(info); */ if (register_framebuffer(info) < 0) { fb_dealloc_cmap(&info->cmap); return -EINVAL; } fb_info(info, "%s frame buffer device\n", info->fix.id); pci_set_drvdata(dev, info); /* or platform_set_drvdata(pdev, info) */ return 0; } /* * Cleanup */ /* static void xxxfb_remove(struct platform_device *pdev) */ static void xxxfb_remove(struct pci_dev *dev) { struct fb_info *info = pci_get_drvdata(dev); /* or platform_get_drvdata(pdev); */ if (info) { unregister_framebuffer(info); fb_dealloc_cmap(&info->cmap); /* ... */ framebuffer_release(info); } } #ifdef CONFIG_PCI #ifdef CONFIG_PM /** * xxxfb_suspend - Optional but recommended function. Suspend the device. * @dev: PCI device * @msg: the suspend event code. * * See Documentation/power/devices.txt for more information */ static int xxxfb_suspend(struct pci_dev *dev, pm_message_t msg) { struct fb_info *info = pci_get_drvdata(dev); struct xxxfb_par *par = info->par; /* suspend here */ return 0; } /** * xxxfb_resume - Optional but recommended function. Resume the device. * @dev: PCI device * * See Documentation/power/devices.txt for more information */ static int xxxfb_resume(struct pci_dev *dev) { struct fb_info *info = pci_get_drvdata(dev); struct xxxfb_par *par = info->par; /* resume here */ return 0; } #else #define xxxfb_suspend NULL #define xxxfb_resume NULL #endif /* CONFIG_PM */ static struct pci_device_id xxxfb_id_table[] = { { PCI_VENDOR_ID_XXX, PCI_DEVICE_ID_XXX, PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY << 16, PCI_CLASS_MASK, 0 }, { 0, } }; /* For PCI drivers */ static struct pci_driver xxxfb_driver = { .name = "xxxfb", .id_table = xxxfb_id_table, .probe = xxxfb_probe, .remove = xxxfb_remove, .suspend = xxxfb_suspend, /* optional but recommended */ .resume = xxxfb_resume, /* optional but recommended */ }; MODULE_DEVICE_TABLE(pci, xxxfb_id_table); int __init xxxfb_init(void) { /* * For kernel boot options (in 'video=xxxfb:<options>' format) */ #ifndef MODULE char *option = NULL; if (fb_get_options("xxxfb", &option)) return -ENODEV; xxxfb_setup(option); #endif return pci_register_driver(&xxxfb_driver); } static void __exit xxxfb_exit(void) { pci_unregister_driver(&xxxfb_driver); } #else /* non PCI, platform drivers */ #include <linux/platform_device.h> /* for platform devices */ #ifdef CONFIG_PM /** * xxxfb_suspend - Optional but recommended function. Suspend the device. * @dev: platform device * @msg: the suspend event code. * * See Documentation/power/devices.txt for more information */ static int xxxfb_suspend(struct platform_device *dev, pm_message_t msg) { struct fb_info *info = platform_get_drvdata(dev); struct xxxfb_par *par = info->par; /* suspend here */ return 0; } /** * xxxfb_resume - Optional but recommended function. Resume the device. * @dev: platform device * * See Documentation/power/devices.txt for more information */ static int xxxfb_resume(struct platform_dev *dev) { struct fb_info *info = platform_get_drvdata(dev); struct xxxfb_par *par = info->par; /* resume here */ return 0; } #else #define xxxfb_suspend NULL #define xxxfb_resume NULL #endif /* CONFIG_PM */ static struct platform_device_driver xxxfb_driver = { .probe = xxxfb_probe, .remove = xxxfb_remove, .suspend = xxxfb_suspend, /* optional but recommended */ .resume = xxxfb_resume, /* optional but recommended */ .driver = { .name = "xxxfb", }, }; static struct platform_device *xxxfb_device; #ifndef MODULE /* * Setup */ /* * Only necessary if your driver takes special options, * otherwise we fall back on the generic fb_setup(). */ int __init xxxfb_setup(char *options) { /* Parse user specified options (`video=xxxfb:') */ } #endif /* MODULE */ static int __init xxxfb_init(void) { int ret; /* * For kernel boot options (in 'video=xxxfb:<options>' format) */ #ifndef MODULE char *option = NULL; if (fb_get_options("xxxfb", &option)) return -ENODEV; xxxfb_setup(option); #endif ret = platform_driver_register(&xxxfb_driver); if (!ret) { xxxfb_device = platform_device_register_simple("xxxfb", 0, NULL, 0); if (IS_ERR(xxxfb_device)) { platform_driver_unregister(&xxxfb_driver); ret = PTR_ERR(xxxfb_device); } } return ret; } static void __exit xxxfb_exit(void) { platform_device_unregister(xxxfb_device); platform_driver_unregister(&xxxfb_driver); } #endif /* CONFIG_PCI */ /* ------------------------------------------------------------------------- */ /* * Modularization */ module_init(xxxfb_init); module_exit(xxxfb_exit); MODULE_LICENSE("GPL");
gpl-2.0
blazingwolf/fireball-ics-3.0.8-2.17.605.2
drivers/bcma/host_pci.c
2380
4923
/* * Broadcom specific AMBA * PCI Host * * Licensed under the GNU/GPL. See COPYING for details. */ #include "bcma_private.h" #include <linux/slab.h> #include <linux/bcma/bcma.h> #include <linux/pci.h> static void bcma_host_pci_switch_core(struct bcma_device *core) { pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN, core->addr); pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2, core->wrap); core->bus->mapped_core = core; pr_debug("Switched to core: 0x%X\n", core->id.id); } static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset) { if (core->bus->mapped_core != core) bcma_host_pci_switch_core(core); return ioread8(core->bus->mmio + offset); } static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset) { if (core->bus->mapped_core != core) bcma_host_pci_switch_core(core); return ioread16(core->bus->mmio + offset); } static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset) { if (core->bus->mapped_core != core) bcma_host_pci_switch_core(core); return ioread32(core->bus->mmio + offset); } static void bcma_host_pci_write8(struct bcma_device *core, u16 offset, u8 value) { if (core->bus->mapped_core != core) bcma_host_pci_switch_core(core); iowrite8(value, core->bus->mmio + offset); } static void bcma_host_pci_write16(struct bcma_device *core, u16 offset, u16 value) { if (core->bus->mapped_core != core) bcma_host_pci_switch_core(core); iowrite16(value, core->bus->mmio + offset); } static void bcma_host_pci_write32(struct bcma_device *core, u16 offset, u32 value) { if (core->bus->mapped_core != core) bcma_host_pci_switch_core(core); iowrite32(value, core->bus->mmio + offset); } static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset) { if (core->bus->mapped_core != core) bcma_host_pci_switch_core(core); return ioread32(core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset); } static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset, u32 value) { if (core->bus->mapped_core != core) bcma_host_pci_switch_core(core); iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset); } const struct bcma_host_ops bcma_host_pci_ops = { .read8 = bcma_host_pci_read8, .read16 = bcma_host_pci_read16, .read32 = bcma_host_pci_read32, .write8 = bcma_host_pci_write8, .write16 = bcma_host_pci_write16, .write32 = bcma_host_pci_write32, .aread32 = bcma_host_pci_aread32, .awrite32 = bcma_host_pci_awrite32, }; static int bcma_host_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct bcma_bus *bus; int err = -ENOMEM; const char *name; u32 val; /* Alloc */ bus = kzalloc(sizeof(*bus), GFP_KERNEL); if (!bus) goto out; /* Basic PCI configuration */ err = pci_enable_device(dev); if (err) goto err_kfree_bus; name = dev_name(&dev->dev); if (dev->driver && dev->driver->name) name = dev->driver->name; err = pci_request_regions(dev, name); if (err) goto err_pci_disable; pci_set_master(dev); /* Disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state */ pci_read_config_dword(dev, 0x40, &val); if ((val & 0x0000ff00) != 0) pci_write_config_dword(dev, 0x40, val & 0xffff00ff); /* SSB needed additional powering up, do we have any AMBA PCI cards? */ if (!pci_is_pcie(dev)) pr_err("PCI card detected, report problems.\n"); /* Map MMIO */ err = -ENOMEM; bus->mmio = pci_iomap(dev, 0, ~0UL); if (!bus->mmio) goto err_pci_release_regions; /* Host specific */ bus->host_pci = dev; bus->hosttype = BCMA_HOSTTYPE_PCI; bus->ops = &bcma_host_pci_ops; /* Register */ err = bcma_bus_register(bus); if (err) goto err_pci_unmap_mmio; pci_set_drvdata(dev, bus); out: return err; err_pci_unmap_mmio: pci_iounmap(dev, bus->mmio); err_pci_release_regions: pci_release_regions(dev); err_pci_disable: pci_disable_device(dev); err_kfree_bus: kfree(bus); return err; } static void bcma_host_pci_remove(struct pci_dev *dev) { struct bcma_bus *bus = pci_get_drvdata(dev); bcma_bus_unregister(bus); pci_iounmap(dev, bus->mmio); pci_release_regions(dev); pci_disable_device(dev); kfree(bus); pci_set_drvdata(dev, NULL); } static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, { 0, }, }; MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl); static struct pci_driver bcma_pci_bridge_driver = { .name = "bcma-pci-bridge", .id_table = bcma_pci_bridge_tbl, .probe = bcma_host_pci_probe, .remove = bcma_host_pci_remove, }; int __init bcma_host_pci_init(void) { return pci_register_driver(&bcma_pci_bridge_driver); } void __exit bcma_host_pci_exit(void) { pci_unregister_driver(&bcma_pci_bridge_driver); }
gpl-2.0
ea4862/boeffla
drivers/staging/comedi/drivers/addi-data/addi_common.c
2380
58899
/** @verbatim Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module. ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier Tel: +19(0)7223/9493-0 Fax: +49(0)7223/9493-92 http://www.addi-data.com info@addi-data.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA You should also find the complete GPL in the COPYING file accompanying this source code. @endverbatim */ /* +-----------------------------------------------------------------------+ | (C) ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier | +-----------------------------------------------------------------------+ | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com | | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com | +-----------------------------------------------------------------------+ | Project : ADDI DATA | Compiler : GCC | | Modulname : addi_common.c | Version : 2.96 | +-------------------------------+---------------------------------------+ | Author : | Date : | +-----------------------------------------------------------------------+ | Description : ADDI COMMON Main Module | +-----------------------------------------------------------------------+ | CONFIG OPTIONS | | option[0] - PCI bus number - if bus number and slot number are 0, | | then driver search for first unused card | | option[1] - PCI slot number | | | | option[2] = 0 - DMA ENABLE | | = 1 - DMA DISABLE | +----------+-----------+------------------------------------------------+ */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/timex.h> #include <linux/timer.h> #include <linux/pci.h> #include <linux/gfp.h> #include "../../comedidev.h" #include <asm/io.h> #if defined(CONFIG_APCI_1710) || defined(CONFIG_APCI_3200) || defined(CONFIG_APCI_3300) #include <asm/i387.h> #endif #include "../comedi_fc.h" #include "addi_common.h" #include "addi_amcc_s5933.h" #ifndef ADDIDATA_DRIVER_NAME #define ADDIDATA_DRIVER_NAME "addi_common" #endif /* Update-0.7.57->0.7.68MODULE_AUTHOR("ADDI-DATA GmbH <info@addi-data.com>"); */ /* Update-0.7.57->0.7.68MODULE_DESCRIPTION("Comedi ADDI-DATA module"); */ /* Update-0.7.57->0.7.68MODULE_LICENSE("GPL"); */ #define devpriv ((struct addi_private *)dev->private) #define this_board ((const struct addi_board *)dev->board_ptr) #if defined(CONFIG_APCI_1710) || defined(CONFIG_APCI_3200) || defined(CONFIG_APCI_3300) /* BYTE b_SaveFPUReg [94]; */ void fpu_begin(void) { /* asm ("fstenv b_SaveFPUReg"); */ kernel_fpu_begin(); } void fpu_end(void) { /* asm ("frstor b_SaveFPUReg"); */ kernel_fpu_end(); } #endif #include "addi_eeprom.c" #if (defined (CONFIG_APCI_3120) || defined (CONFIG_APCI_3001)) #include "hwdrv_apci3120.c" #endif #ifdef CONFIG_APCI_1032 #include "hwdrv_apci1032.c" #endif #ifdef CONFIG_APCI_1516 #include "hwdrv_apci1516.c" #endif #ifdef CONFIG_APCI_2016 #include "hwdrv_apci2016.c" #endif #ifdef CONFIG_APCI_2032 #include "hwdrv_apci2032.c" #endif #ifdef CONFIG_APCI_2200 #include "hwdrv_apci2200.c" #endif #ifdef CONFIG_APCI_1564 #include "hwdrv_apci1564.c" #endif #ifdef CONFIG_APCI_1500 #include "hwdrv_apci1500.c" #endif #ifdef CONFIG_APCI_3501 #include "hwdrv_apci3501.c" #endif #ifdef CONFIG_APCI_035 #include "hwdrv_apci035.c" #endif #if (defined (CONFIG_APCI_3200) || defined (CONFIG_APCI_3300)) #include "hwdrv_apci3200.c" #endif #ifdef CONFIG_APCI_1710 #include "hwdrv_APCI1710.c" #endif #ifdef CONFIG_APCI_16XX #include "hwdrv_apci16xx.c" #endif #ifdef CONFIG_APCI_3XXX #include "hwdrv_apci3xxx.c" #endif #ifndef COMEDI_SUBD_TTLIO #define COMEDI_SUBD_TTLIO 11 /* Digital Input Output But TTL */ #endif static DEFINE_PCI_DEVICE_TABLE(addi_apci_tbl) = { #ifdef CONFIG_APCI_3120 {APCI3120_BOARD_VENDOR_ID, 0x818D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_1032 {APCI1032_BOARD_VENDOR_ID, 0x1003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_1516 {APCI1516_BOARD_VENDOR_ID, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_2016 {APCI2016_BOARD_VENDOR_ID, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_2032 {APCI2032_BOARD_VENDOR_ID, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_2200 {APCI2200_BOARD_VENDOR_ID, 0x1005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_1564 {APCI1564_BOARD_VENDOR_ID, 0x1006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_1500 {APCI1500_BOARD_VENDOR_ID, 0x80fc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_3001 {APCI3120_BOARD_VENDOR_ID, 0x828D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_3501 {APCI3501_BOARD_VENDOR_ID, 0x3001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_035 {APCI035_BOARD_VENDOR_ID, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_3200 {APCI3200_BOARD_VENDOR_ID, 0x3000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_3300 {APCI3200_BOARD_VENDOR_ID, 0x3007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_1710 {APCI1710_BOARD_VENDOR_ID, APCI1710_BOARD_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_16XX {0x15B8, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x100A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_3XXX {0x15B8, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x300F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x300E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3015, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3016, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3017, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3018, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x301A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x301B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x301C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x301D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x301E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x301F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3023, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x300B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif {0} }; MODULE_DEVICE_TABLE(pci, addi_apci_tbl); static const struct addi_board boardtypes[] = { #ifdef CONFIG_APCI_3120 {"apci3120", APCI3120_BOARD_VENDOR_ID, 0x818D, AMCC_OP_REG_SIZE, APCI3120_ADDRESS_RANGE, 8, 0, ADDIDATA_NO_EEPROM, NULL, 16, 8, 16, 8, 0xffff, 0x3fff, &range_apci3120_ai, &range_apci3120_ao, 4, 4, 0x0f, 0, NULL, 1, 1, 1, 10000, 100000, v_APCI3120_Interrupt, i_APCI3120_Reset, i_APCI3120_InsnConfigAnalogInput, i_APCI3120_InsnReadAnalogInput, NULL, NULL, i_APCI3120_CommandTestAnalogInput, i_APCI3120_CommandAnalogInput, i_APCI3120_StopCyclicAcquisition, NULL, i_APCI3120_InsnWriteAnalogOutput, NULL, NULL, i_APCI3120_InsnReadDigitalInput, NULL, i_APCI3120_InsnBitsDigitalInput, i_APCI3120_InsnConfigDigitalOutput, i_APCI3120_InsnWriteDigitalOutput, i_APCI3120_InsnBitsDigitalOutput, NULL, i_APCI3120_InsnConfigTimer, i_APCI3120_InsnWriteTimer, i_APCI3120_InsnReadTimer, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_1032 {"apci1032", APCI1032_BOARD_VENDOR_ID, 0x1003, 4, APCI1032_ADDRESS_RANGE, 0, 0, ADDIDATA_EEPROM, ADDIDATA_93C76, 0, 0, 0, 0, 0, 0, NULL, NULL, 32, 0, 0, 0, NULL, 0, 0, 0, 0, 0, v_APCI1032_Interrupt, i_APCI1032_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI1032_ConfigDigitalInput, i_APCI1032_Read1DigitalInput, NULL, i_APCI1032_ReadMoreDigitalInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_1516 {"apci1516", APCI1516_BOARD_VENDOR_ID, 0x1001, 128, APCI1516_ADDRESS_RANGE, 32, 0, ADDIDATA_EEPROM, ADDIDATA_S5920, 0, 0, 0, 0, 0, 0, NULL, NULL, 8, 8, 0, 0, NULL, 0, 1, 0, 0, 0, NULL, i_APCI1516_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI1516_Read1DigitalInput, NULL, i_APCI1516_ReadMoreDigitalInput, i_APCI1516_ConfigDigitalOutput, i_APCI1516_WriteDigitalOutput, i_APCI1516_ReadDigitalOutput, NULL, i_APCI1516_ConfigWatchdog, i_APCI1516_StartStopWriteWatchdog, i_APCI1516_ReadWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_2016 {"apci2016", APCI2016_BOARD_VENDOR_ID, 0x1002, 128, APCI2016_ADDRESS_RANGE, 32, 0, ADDIDATA_EEPROM, ADDIDATA_S5920, 0, 0, 0, 0, 0, 0, NULL, NULL, 0, 16, 0, 0, NULL, 0, 1, 0, 0, 0, NULL, i_APCI2016_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI2016_ConfigDigitalOutput, i_APCI2016_WriteDigitalOutput, i_APCI2016_BitsDigitalOutput, NULL, i_APCI2016_ConfigWatchdog, i_APCI2016_StartStopWriteWatchdog, i_APCI2016_ReadWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_2032 {"apci2032", APCI2032_BOARD_VENDOR_ID, 0x1004, 4, APCI2032_ADDRESS_RANGE, 0, 0, ADDIDATA_EEPROM, ADDIDATA_93C76, 0, 0, 0, 0, 0, 0, NULL, NULL, 0, 32, 0xffffffff, 0, NULL, 0, 1, 0, 0, 0, v_APCI2032_Interrupt, i_APCI2032_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI2032_ConfigDigitalOutput, i_APCI2032_WriteDigitalOutput, i_APCI2032_ReadDigitalOutput, i_APCI2032_ReadInterruptStatus, i_APCI2032_ConfigWatchdog, i_APCI2032_StartStopWriteWatchdog, i_APCI2032_ReadWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_2200 {"apci2200", APCI2200_BOARD_VENDOR_ID, 0x1005, 4, APCI2200_ADDRESS_RANGE, 0, 0, ADDIDATA_EEPROM, ADDIDATA_93C76, 0, 0, 0, 0, 0, 0, NULL, NULL, 8, 16, 0, 0, NULL, 0, 1, 0, 0, 0, NULL, i_APCI2200_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI2200_Read1DigitalInput, NULL, i_APCI2200_ReadMoreDigitalInput, i_APCI2200_ConfigDigitalOutput, i_APCI2200_WriteDigitalOutput, i_APCI2200_ReadDigitalOutput, NULL, i_APCI2200_ConfigWatchdog, i_APCI2200_StartStopWriteWatchdog, i_APCI2200_ReadWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_1564 {"apci1564", APCI1564_BOARD_VENDOR_ID, 0x1006, 128, APCI1564_ADDRESS_RANGE, 0, 0, ADDIDATA_EEPROM, ADDIDATA_93C76, 0, 0, 0, 0, 0, 0, NULL, NULL, 32, 32, 0xffffffff, 0, NULL, 0, 1, 0, 0, 0, v_APCI1564_Interrupt, i_APCI1564_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI1564_ConfigDigitalInput, i_APCI1564_Read1DigitalInput, NULL, i_APCI1564_ReadMoreDigitalInput, i_APCI1564_ConfigDigitalOutput, i_APCI1564_WriteDigitalOutput, i_APCI1564_ReadDigitalOutput, i_APCI1564_ReadInterruptStatus, i_APCI1564_ConfigTimerCounterWatchdog, i_APCI1564_StartStopWriteTimerCounterWatchdog, i_APCI1564_ReadTimerCounterWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_1500 {"apci1500", APCI1500_BOARD_VENDOR_ID, 0x80fc, 128, APCI1500_ADDRESS_RANGE, 4, 0, ADDIDATA_NO_EEPROM, NULL, 0, 0, 0, 0, 0, 0, NULL, NULL, 16, 16, 0xffff, 0, NULL, 0, 1, 0, 0, 0, v_APCI1500_Interrupt, i_APCI1500_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI1500_ConfigDigitalInputEvent, i_APCI1500_Initialisation, i_APCI1500_StartStopInputEvent, i_APCI1500_ReadMoreDigitalInput, i_APCI1500_ConfigDigitalOutputErrorInterrupt, i_APCI1500_WriteDigitalOutput, i_APCI1500_ConfigureInterrupt, NULL, i_APCI1500_ConfigCounterTimerWatchdog, i_APCI1500_StartStopTriggerTimerCounterWatchdog, i_APCI1500_ReadInterruptMask, i_APCI1500_ReadCounterTimerWatchdog, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_3001 {"apci3001", APCI3120_BOARD_VENDOR_ID, 0x828D, AMCC_OP_REG_SIZE, APCI3120_ADDRESS_RANGE, 8, 0, ADDIDATA_NO_EEPROM, NULL, 16, 8, 16, 0, 0xfff, 0, &range_apci3120_ai, NULL, 4, 4, 0x0f, 0, NULL, 1, 1, 1, 10000, 100000, v_APCI3120_Interrupt, i_APCI3120_Reset, i_APCI3120_InsnConfigAnalogInput, i_APCI3120_InsnReadAnalogInput, NULL, NULL, i_APCI3120_CommandTestAnalogInput, i_APCI3120_CommandAnalogInput, i_APCI3120_StopCyclicAcquisition, NULL, NULL, NULL, NULL, i_APCI3120_InsnReadDigitalInput, NULL, i_APCI3120_InsnBitsDigitalInput, i_APCI3120_InsnConfigDigitalOutput, i_APCI3120_InsnWriteDigitalOutput, i_APCI3120_InsnBitsDigitalOutput, NULL, i_APCI3120_InsnConfigTimer, i_APCI3120_InsnWriteTimer, i_APCI3120_InsnReadTimer, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_3501 {"apci3501", APCI3501_BOARD_VENDOR_ID, 0x3001, 64, APCI3501_ADDRESS_RANGE, 0, 0, ADDIDATA_EEPROM, ADDIDATA_S5933, 0, 0, 0, 8, 0, 16383, NULL, &range_apci3501_ao, 2, 2, 0x3, 0, NULL, 0, 1, 0, 0, 0, v_APCI3501_Interrupt, i_APCI3501_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3501_ConfigAnalogOutput, i_APCI3501_WriteAnalogOutput, NULL, NULL, NULL, NULL, i_APCI3501_ReadDigitalInput, i_APCI3501_ConfigDigitalOutput, i_APCI3501_WriteDigitalOutput, i_APCI3501_ReadDigitalOutput, NULL, i_APCI3501_ConfigTimerCounterWatchdog, i_APCI3501_StartStopWriteTimerCounterWatchdog, i_APCI3501_ReadTimerCounterWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_035 {"apci035", APCI035_BOARD_VENDOR_ID, 0x0300, 127, APCI035_ADDRESS_RANGE, 0, 0, 1, ADDIDATA_S5920, 16, 8, 16, 0, 0xff, 0, &range_apci035_ai, NULL, 0, 0, 0, 0, NULL, 0, 1, 0, 10000, 100000, v_APCI035_Interrupt, i_APCI035_Reset, i_APCI035_ConfigAnalogInput, i_APCI035_ReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI035_ConfigTimerWatchdog, i_APCI035_StartStopWriteTimerWatchdog, i_APCI035_ReadTimerWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_3200 {"apci3200", APCI3200_BOARD_VENDOR_ID, 0x3000, 128, 256, 4, 4, ADDIDATA_EEPROM, ADDIDATA_S5920, 16, 8, 16, 0, 0x3ffff, 0, &range_apci3200_ai, NULL, 4, 4, 0, 0, NULL, 0, 0, 0, 10000, 100000, v_APCI3200_Interrupt, i_APCI3200_Reset, i_APCI3200_ConfigAnalogInput, i_APCI3200_ReadAnalogInput, i_APCI3200_InsnWriteReleaseAnalogInput, i_APCI3200_InsnBits_AnalogInput_Test, i_APCI3200_CommandTestAnalogInput, i_APCI3200_CommandAnalogInput, i_APCI3200_StopCyclicAcquisition, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3200_ReadDigitalInput, i_APCI3200_ConfigDigitalOutput, i_APCI3200_WriteDigitalOutput, i_APCI3200_ReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_3300 /* Begin JK .20.10.2004 = APCI-3300 integration */ {"apci3300", APCI3200_BOARD_VENDOR_ID, 0x3007, 128, 256, 4, 4, ADDIDATA_EEPROM, ADDIDATA_S5920, 0, 8, 8, 0, 0x3ffff, 0, &range_apci3300_ai, NULL, 4, 4, 0, 0, NULL, 0, 0, 0, 10000, 100000, v_APCI3200_Interrupt, i_APCI3200_Reset, i_APCI3200_ConfigAnalogInput, i_APCI3200_ReadAnalogInput, i_APCI3200_InsnWriteReleaseAnalogInput, i_APCI3200_InsnBits_AnalogInput_Test, i_APCI3200_CommandTestAnalogInput, i_APCI3200_CommandAnalogInput, i_APCI3200_StopCyclicAcquisition, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3200_ReadDigitalInput, i_APCI3200_ConfigDigitalOutput, i_APCI3200_WriteDigitalOutput, i_APCI3200_ReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_1710 {"apci1710", APCI1710_BOARD_VENDOR_ID, APCI1710_BOARD_DEVICE_ID, 128, 8, 256, 0, ADDIDATA_NO_EEPROM, NULL, 0, 0, 0, 0, 0, 0, NULL, NULL, 0, 0, 0, 0, NULL, 0, 0, 0, 0, 0, v_APCI1710_Interrupt, i_APCI1710_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_16XX {"apci1648", 0x15B8, 0x1009, 128, 0, 0, 0, ADDIDATA_NO_EEPROM, NULL, 0, 0, 0, 0, 0, 0, NULL, NULL, 0, 0, 0, 48, &range_apci16xx_ttl, 0, 0, 0, 0, 0, NULL, i_APCI16XX_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI16XX_InsnConfigInitTTLIO, i_APCI16XX_InsnBitsReadTTLIO, i_APCI16XX_InsnReadTTLIOAllPortValue, i_APCI16XX_InsnBitsWriteTTLIO}, {"apci1696", 0x15B8, 0x100A, 128, 0, 0, 0, ADDIDATA_NO_EEPROM, NULL, 0, 0, 0, 0, 0, 0, NULL, NULL, 0, 0, 0, 96, &range_apci16xx_ttl, 0, 0, 0, 0, 0, NULL, i_APCI16XX_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI16XX_InsnConfigInitTTLIO, i_APCI16XX_InsnBitsReadTTLIO, i_APCI16XX_InsnReadTTLIOAllPortValue, i_APCI16XX_InsnBitsWriteTTLIO}, #endif #ifdef CONFIG_APCI_3XXX {"apci3000-16", 0x15B8, 0x3010, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 0, 4095, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3000-8", 0x15B8, 0x300F, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 0, 4095, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3000-4", 0x15B8, 0x300E, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 4, 2, 4, 0, 4095, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3006-16", 0x15B8, 0x3013, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 0, 65535, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3006-8", 0x15B8, 0x3014, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 0, 65535, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3006-4", 0x15B8, 0x3015, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 4, 2, 4, 0, 65535, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3010-16", 0x15B8, 0x3016, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 0, 4095, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3010-8", 0x15B8, 0x3017, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 0, 4095, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3010-4", 0x15B8, 0x3018, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 4, 2, 4, 0, 4095, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3016-16", 0x15B8, 0x3019, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3016-8", 0x15B8, 0x301A, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3016-4", 0x15B8, 0x301B, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 4, 2, 4, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3100-16-4", 0x15B8, 0x301C, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 4, 4095, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3100-8-4", 0x15B8, 0x301D, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 4, 4095, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3106-16-4", 0x15B8, 0x301E, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 4, 65535, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3106-8-4", 0x15B8, 0x301F, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 4, 65535, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3110-16-4", 0x15B8, 0x3020, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 4, 4095, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3110-8-4", 0x15B8, 0x3021, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 4, 4095, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3116-16-4", 0x15B8, 0x3022, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 4, 65535, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3116-8-4", 0x15B8, 0x3023, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 4, 65535, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3003", 0x15B8, 0x300B, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 0, 4, 4, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 0, NULL, 0, 0, 7, 2500, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {"apci3002-16", 0x15B8, 0x3002, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 0, 16, 16, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 0, NULL, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {"apci3002-8", 0x15B8, 0x3003, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 0, 8, 8, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 0, NULL, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {"apci3002-4", 0x15B8, 0x3004, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 0, 4, 4, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 0, NULL, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {"apci3500", 0x15B8, 0x3024, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 0, 0, 0, 4, 0, 4095, NULL, &range_apci3XXX_ao, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 0, 0, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, #endif }; #define n_boardtypes (sizeof(boardtypes)/sizeof(struct addi_board)) static struct comedi_driver driver_addi = { .driver_name = ADDIDATA_DRIVER_NAME, .module = THIS_MODULE, .attach = i_ADDI_Attach, .detach = i_ADDI_Detach, .num_names = n_boardtypes, .board_name = &boardtypes[0].pc_DriverName, .offset = sizeof(struct addi_board), }; static int __devinit driver_addi_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_addi.driver_name); } static void __devexit driver_addi_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_addi_pci_driver = { .id_table = addi_apci_tbl, .probe = &driver_addi_pci_probe, .remove = __devexit_p(&driver_addi_pci_remove) }; static int __init driver_addi_init_module(void) { int retval; retval = comedi_driver_register(&driver_addi); if (retval < 0) return retval; driver_addi_pci_driver.name = (char *)driver_addi.driver_name; return pci_register_driver(&driver_addi_pci_driver); } static void __exit driver_addi_cleanup_module(void) { pci_unregister_driver(&driver_addi_pci_driver); comedi_driver_unregister(&driver_addi); } module_init(driver_addi_init_module); module_exit(driver_addi_cleanup_module); /* +----------------------------------------------------------------------------+ | Function name :static int i_ADDI_Attach(struct comedi_device *dev, | | struct comedi_devconfig *it) | | | +----------------------------------------------------------------------------+ | Task :Detects the card. | | Configure the driver for a particular board. | | This function does all the initializations and memory | | allocation of data structures for the driver. | +----------------------------------------------------------------------------+ | Input Parameters :struct comedi_device *dev | | struct comedi_devconfig *it | | | +----------------------------------------------------------------------------+ | Return Value : 0 | | | +----------------------------------------------------------------------------+ */ static int i_ADDI_Attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int ret, pages, i, n_subdevices; unsigned int dw_Dummy; resource_size_t io_addr[5]; unsigned int irq; resource_size_t iobase_a, iobase_main, iobase_addon, iobase_reserved; struct pcilst_struct *card = NULL; unsigned char pci_bus, pci_slot, pci_func; int i_Dma = 0; ret = alloc_private(dev, sizeof(struct addi_private)); if (ret < 0) return -ENOMEM; if (!pci_list_builded) { v_pci_card_list_init(this_board->i_VendorId, 1); /* 1 for displaying the list.. */ pci_list_builded = 1; } /* printk("comedi%d: "ADDIDATA_DRIVER_NAME": board=%s",dev->minor,this_board->pc_DriverName); */ if ((this_board->i_Dma) && (it->options[2] == 0)) { i_Dma = 1; } card = ptr_select_and_alloc_pci_card(this_board->i_VendorId, this_board->i_DeviceId, it->options[0], it->options[1], i_Dma); if (card == NULL) return -EIO; devpriv->allocated = 1; if ((i_pci_card_data(card, &pci_bus, &pci_slot, &pci_func, &io_addr[0], &irq)) < 0) { i_pci_card_free(card); printk(" - Can't get AMCC data!\n"); return -EIO; } iobase_a = io_addr[0]; iobase_main = io_addr[1]; iobase_addon = io_addr[2]; iobase_reserved = io_addr[3]; printk("\nBus %d: Slot %d: Funct%d\nBase0: 0x%8llx\nBase1: 0x%8llx\nBase2: 0x%8llx\nBase3: 0x%8llx\n", pci_bus, pci_slot, pci_func, (unsigned long long)io_addr[0], (unsigned long long)io_addr[1], (unsigned long long)io_addr[2], (unsigned long long)io_addr[3]); if ((this_board->pc_EepromChip == NULL) || (strcmp(this_board->pc_EepromChip, ADDIDATA_9054) != 0)) { /************************************/ /* Test if more that 1 address used */ /************************************/ if (this_board->i_IorangeBase1 != 0) { dev->iobase = (unsigned long)iobase_main; /* DAQ base address... */ } else { dev->iobase = (unsigned long)iobase_a; /* DAQ base address... */ } dev->board_name = this_board->pc_DriverName; devpriv->amcc = card; devpriv->iobase = (int) dev->iobase; devpriv->i_IobaseAmcc = (int) iobase_a; /* AMCC base address... */ devpriv->i_IobaseAddon = (int) iobase_addon; /* ADD ON base address.... */ devpriv->i_IobaseReserved = (int) iobase_reserved; } else { dev->board_name = this_board->pc_DriverName; dev->iobase = (unsigned long)io_addr[2]; devpriv->amcc = card; devpriv->iobase = (int) io_addr[2]; devpriv->i_IobaseReserved = (int) io_addr[3]; printk("\nioremap begin"); devpriv->dw_AiBase = ioremap(io_addr[3], this_board->i_IorangeBase3); printk("\nioremap end"); } /* Initialize parameters that can be overridden in EEPROM */ devpriv->s_EeParameters.i_NbrAiChannel = this_board->i_NbrAiChannel; devpriv->s_EeParameters.i_NbrAoChannel = this_board->i_NbrAoChannel; devpriv->s_EeParameters.i_AiMaxdata = this_board->i_AiMaxdata; devpriv->s_EeParameters.i_AoMaxdata = this_board->i_AoMaxdata; devpriv->s_EeParameters.i_NbrDiChannel = this_board->i_NbrDiChannel; devpriv->s_EeParameters.i_NbrDoChannel = this_board->i_NbrDoChannel; devpriv->s_EeParameters.i_DoMaxdata = this_board->i_DoMaxdata; devpriv->s_EeParameters.i_Dma = this_board->i_Dma; devpriv->s_EeParameters.i_Timer = this_board->i_Timer; devpriv->s_EeParameters.ui_MinAcquisitiontimeNs = this_board->ui_MinAcquisitiontimeNs; devpriv->s_EeParameters.ui_MinDelaytimeNs = this_board->ui_MinDelaytimeNs; /* ## */ if (irq > 0) { if (request_irq(irq, v_ADDI_Interrupt, IRQF_SHARED, this_board->pc_DriverName, dev) < 0) { printk(", unable to allocate IRQ %u, DISABLING IT", irq); irq = 0; /* Can't use IRQ */ } else { printk("\nirq=%u", irq); } } else { printk(", IRQ disabled"); } printk("\nOption %d %d %d\n", it->options[0], it->options[1], it->options[2]); dev->irq = irq; /* Read eepeom and fill addi_board Structure */ if (this_board->i_PCIEeprom) { printk("\nPCI Eeprom used"); if (!(strcmp(this_board->pc_EepromChip, "S5920"))) { /* Set 3 wait stait */ if (!(strcmp(this_board->pc_DriverName, "apci035"))) { outl(0x80808082, devpriv->i_IobaseAmcc + 0x60); } else { outl(0x83838383, devpriv->i_IobaseAmcc + 0x60); } /* Enable the interrupt for the controller */ dw_Dummy = inl(devpriv->i_IobaseAmcc + 0x38); outl(dw_Dummy | 0x2000, devpriv->i_IobaseAmcc + 0x38); printk("\nEnable the interrupt for the controller"); } printk("\nRead Eeprom"); i_EepromReadMainHeader(io_addr[0], this_board->pc_EepromChip, dev); } else { printk("\nPCI Eeprom unused"); } if (it->options[2] > 0) { devpriv->us_UseDma = ADDI_DISABLE; } else { devpriv->us_UseDma = ADDI_ENABLE; } if (devpriv->s_EeParameters.i_Dma) { printk("\nDMA used"); if (devpriv->us_UseDma == ADDI_ENABLE) { /* alloc DMA buffers */ devpriv->b_DmaDoubleBuffer = 0; for (i = 0; i < 2; i++) { for (pages = 4; pages >= 0; pages--) { devpriv->ul_DmaBufferVirtual[i] = (void *) __get_free_pages(GFP_KERNEL, pages); if (devpriv->ul_DmaBufferVirtual[i]) break; } if (devpriv->ul_DmaBufferVirtual[i]) { devpriv->ui_DmaBufferPages[i] = pages; devpriv->ui_DmaBufferSize[i] = PAGE_SIZE * pages; devpriv->ui_DmaBufferSamples[i] = devpriv-> ui_DmaBufferSize[i] >> 1; devpriv->ul_DmaBufferHw[i] = virt_to_bus((void *)devpriv-> ul_DmaBufferVirtual[i]); } } if (!devpriv->ul_DmaBufferVirtual[0]) { printk (", Can't allocate DMA buffer, DMA disabled!"); devpriv->us_UseDma = ADDI_DISABLE; } if (devpriv->ul_DmaBufferVirtual[1]) { devpriv->b_DmaDoubleBuffer = 1; } } if ((devpriv->us_UseDma == ADDI_ENABLE)) { printk("\nDMA ENABLED\n"); } else { printk("\nDMA DISABLED\n"); } } if (!strcmp(this_board->pc_DriverName, "apci1710")) { #ifdef CONFIG_APCI_1710 i_ADDI_AttachPCI1710(dev); /* save base address */ devpriv->s_BoardInfos.ui_Address = io_addr[2]; #endif } else { /* Update-0.7.57->0.7.68dev->n_subdevices = 7; */ n_subdevices = 7; ret = alloc_subdevices(dev, n_subdevices); if (ret < 0) return ret; /* Allocate and Initialise AI Subdevice Structures */ s = dev->subdevices + 0; if ((devpriv->s_EeParameters.i_NbrAiChannel) || (this_board->i_NbrAiChannelDiff)) { dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF; if (devpriv->s_EeParameters.i_NbrAiChannel) { s->n_chan = devpriv->s_EeParameters.i_NbrAiChannel; devpriv->b_SingelDiff = 0; } else { s->n_chan = this_board->i_NbrAiChannelDiff; devpriv->b_SingelDiff = 1; } s->maxdata = devpriv->s_EeParameters.i_AiMaxdata; s->len_chanlist = this_board->i_AiChannelList; s->range_table = this_board->pr_AiRangelist; /* Set the initialisation flag */ devpriv->b_AiInitialisation = 1; s->insn_config = this_board->i_hwdrv_InsnConfigAnalogInput; s->insn_read = this_board->i_hwdrv_InsnReadAnalogInput; s->insn_write = this_board->i_hwdrv_InsnWriteAnalogInput; s->insn_bits = this_board->i_hwdrv_InsnBitsAnalogInput; s->do_cmdtest = this_board->i_hwdrv_CommandTestAnalogInput; s->do_cmd = this_board->i_hwdrv_CommandAnalogInput; s->cancel = this_board->i_hwdrv_CancelAnalogInput; } else { s->type = COMEDI_SUBD_UNUSED; } /* Allocate and Initialise AO Subdevice Structures */ s = dev->subdevices + 1; if (devpriv->s_EeParameters.i_NbrAoChannel) { s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON; s->n_chan = devpriv->s_EeParameters.i_NbrAoChannel; s->maxdata = devpriv->s_EeParameters.i_AoMaxdata; s->len_chanlist = devpriv->s_EeParameters.i_NbrAoChannel; s->range_table = this_board->pr_AoRangelist; s->insn_config = this_board->i_hwdrv_InsnConfigAnalogOutput; s->insn_write = this_board->i_hwdrv_InsnWriteAnalogOutput; } else { s->type = COMEDI_SUBD_UNUSED; } /* Allocate and Initialise DI Subdevice Structures */ s = dev->subdevices + 2; if (devpriv->s_EeParameters.i_NbrDiChannel) { s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON; s->n_chan = devpriv->s_EeParameters.i_NbrDiChannel; s->maxdata = 1; s->len_chanlist = devpriv->s_EeParameters.i_NbrDiChannel; s->range_table = &range_digital; s->io_bits = 0; /* all bits input */ s->insn_config = this_board->i_hwdrv_InsnConfigDigitalInput; s->insn_read = this_board->i_hwdrv_InsnReadDigitalInput; s->insn_write = this_board->i_hwdrv_InsnWriteDigitalInput; s->insn_bits = this_board->i_hwdrv_InsnBitsDigitalInput; } else { s->type = COMEDI_SUBD_UNUSED; } /* Allocate and Initialise DO Subdevice Structures */ s = dev->subdevices + 3; if (devpriv->s_EeParameters.i_NbrDoChannel) { s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_READABLE | SDF_WRITEABLE | SDF_GROUND | SDF_COMMON; s->n_chan = devpriv->s_EeParameters.i_NbrDoChannel; s->maxdata = devpriv->s_EeParameters.i_DoMaxdata; s->len_chanlist = devpriv->s_EeParameters.i_NbrDoChannel; s->range_table = &range_digital; s->io_bits = 0xf; /* all bits output */ s->insn_config = this_board->i_hwdrv_InsnConfigDigitalOutput; /* for digital output memory.. */ s->insn_write = this_board->i_hwdrv_InsnWriteDigitalOutput; s->insn_bits = this_board->i_hwdrv_InsnBitsDigitalOutput; s->insn_read = this_board->i_hwdrv_InsnReadDigitalOutput; } else { s->type = COMEDI_SUBD_UNUSED; } /* Allocate and Initialise Timer Subdevice Structures */ s = dev->subdevices + 4; if (devpriv->s_EeParameters.i_Timer) { s->type = COMEDI_SUBD_TIMER; s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON; s->n_chan = 1; s->maxdata = 0; s->len_chanlist = 1; s->range_table = &range_digital; s->insn_write = this_board->i_hwdrv_InsnWriteTimer; s->insn_read = this_board->i_hwdrv_InsnReadTimer; s->insn_config = this_board->i_hwdrv_InsnConfigTimer; s->insn_bits = this_board->i_hwdrv_InsnBitsTimer; } else { s->type = COMEDI_SUBD_UNUSED; } /* Allocate and Initialise TTL */ s = dev->subdevices + 5; if (this_board->i_NbrTTLChannel) { s->type = COMEDI_SUBD_TTLIO; s->subdev_flags = SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON; s->n_chan = this_board->i_NbrTTLChannel; s->maxdata = 1; s->io_bits = 0; /* all bits input */ s->len_chanlist = this_board->i_NbrTTLChannel; s->range_table = &range_digital; s->insn_config = this_board->i_hwdr_ConfigInitTTLIO; s->insn_bits = this_board->i_hwdr_ReadTTLIOBits; s->insn_read = this_board->i_hwdr_ReadTTLIOAllPortValue; s->insn_write = this_board->i_hwdr_WriteTTLIOChlOnOff; } else { s->type = COMEDI_SUBD_UNUSED; } /* EEPROM */ s = dev->subdevices + 6; if (this_board->i_PCIEeprom) { s->type = COMEDI_SUBD_MEMORY; s->subdev_flags = SDF_READABLE | SDF_INTERNAL; s->n_chan = 256; s->maxdata = 0xffff; s->insn_read = i_ADDIDATA_InsnReadEeprom; } else { s->type = COMEDI_SUBD_UNUSED; } } printk("\ni_ADDI_Attach end\n"); i_ADDI_Reset(dev); devpriv->b_ValidDriver = 1; return 0; } /* +----------------------------------------------------------------------------+ | Function name : static int i_ADDI_Detach(struct comedi_device *dev) | | | | | +----------------------------------------------------------------------------+ | Task : Deallocates resources of the addi_common driver | | Free the DMA buffers, unregister irq. | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | | | | +----------------------------------------------------------------------------+ | Return Value : 0 | | | +----------------------------------------------------------------------------+ */ static int i_ADDI_Detach(struct comedi_device *dev) { if (dev->private) { if (devpriv->b_ValidDriver) { i_ADDI_Reset(dev); } if (dev->irq) { free_irq(dev->irq, dev); } if ((this_board->pc_EepromChip == NULL) || (strcmp(this_board->pc_EepromChip, ADDIDATA_9054) != 0)) { if (devpriv->allocated) { i_pci_card_free(devpriv->amcc); } if (devpriv->ul_DmaBufferVirtual[0]) { free_pages((unsigned long)devpriv-> ul_DmaBufferVirtual[0], devpriv->ui_DmaBufferPages[0]); } if (devpriv->ul_DmaBufferVirtual[1]) { free_pages((unsigned long)devpriv-> ul_DmaBufferVirtual[1], devpriv->ui_DmaBufferPages[1]); } } else { iounmap(devpriv->dw_AiBase); if (devpriv->allocated) { i_pci_card_free(devpriv->amcc); } } if (pci_list_builded) { /* v_pci_card_list_cleanup(PCI_VENDOR_ID_AMCC); */ v_pci_card_list_cleanup(this_board->i_VendorId); pci_list_builded = 0; } } return 0; } /* +----------------------------------------------------------------------------+ | Function name : static int i_ADDI_Reset(struct comedi_device *dev) | | | +----------------------------------------------------------------------------+ | Task : Disables all interrupts, Resets digital output to low, | | Set all analog output to low | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | | | | +----------------------------------------------------------------------------+ | Return Value : 0 | | | +----------------------------------------------------------------------------+ */ static int i_ADDI_Reset(struct comedi_device *dev) { this_board->i_hwdrv_Reset(dev); return 0; } /* Interrupt function */ /* +----------------------------------------------------------------------------+ | Function name : | |static void v_ADDI_Interrupt(int irq, void *d) | | | +----------------------------------------------------------------------------+ | Task : Registerd interrupt routine | | | +----------------------------------------------------------------------------+ | Input Parameters : int irq | | | | | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ static irqreturn_t v_ADDI_Interrupt(int irq, void *d) { struct comedi_device *dev = d; this_board->v_hwdrv_Interrupt(irq, d); return IRQ_RETVAL(1); } /* EEPROM Read Function */ /* +----------------------------------------------------------------------------+ | Function name : | |INT i_ADDIDATA_InsnReadEeprom(struct comedi_device *dev,struct comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) | | +----------------------------------------------------------------------------+ | Task : Read 256 words from EEPROM | | | +----------------------------------------------------------------------------+ | Input Parameters :(struct comedi_device *dev,struct comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) | | | | | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ static int i_ADDIDATA_InsnReadEeprom(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned short w_Data; unsigned short w_Address; w_Address = CR_CHAN(insn->chanspec); /* address to be read as 0,1,2,3...255 */ w_Data = w_EepromReadWord(devpriv->i_IobaseAmcc, this_board->pc_EepromChip, 0x100 + (2 * w_Address)); data[0] = w_Data; /* multiplied by 2 bcozinput will be like 0,1,2...255 */ return insn->n; }
gpl-2.0
CyanogenMod/lge-kernel-e400
drivers/infiniband/hw/mthca/mthca_eq.c
2636
25634
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include "mthca_dev.h" #include "mthca_cmd.h" #include "mthca_config_reg.h" enum { MTHCA_NUM_ASYNC_EQE = 0x80, MTHCA_NUM_CMD_EQE = 0x80, MTHCA_NUM_SPARE_EQE = 0x80, MTHCA_EQ_ENTRY_SIZE = 0x20 }; /* * Must be packed because start is 64 bits but only aligned to 32 bits. */ struct mthca_eq_context { __be32 flags; __be64 start; __be32 logsize_usrpage; __be32 tavor_pd; /* reserved for Arbel */ u8 reserved1[3]; u8 intr; __be32 arbel_pd; /* lost_count for Tavor */ __be32 lkey; u32 reserved2[2]; __be32 consumer_index; __be32 producer_index; u32 reserved3[4]; } __attribute__((packed)); #define MTHCA_EQ_STATUS_OK ( 0 << 28) #define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28) #define MTHCA_EQ_STATUS_WRITE_FAIL (10 << 28) #define MTHCA_EQ_OWNER_SW ( 0 << 24) #define MTHCA_EQ_OWNER_HW ( 1 << 24) #define MTHCA_EQ_FLAG_TR ( 1 << 18) #define MTHCA_EQ_FLAG_OI ( 1 << 17) #define MTHCA_EQ_STATE_ARMED ( 1 << 8) #define MTHCA_EQ_STATE_FIRED ( 2 << 8) #define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 << 8) #define MTHCA_EQ_STATE_ARBEL ( 8 << 8) enum { MTHCA_EVENT_TYPE_COMP = 0x00, MTHCA_EVENT_TYPE_PATH_MIG = 0x01, MTHCA_EVENT_TYPE_COMM_EST = 0x02, MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03, MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13, MTHCA_EVENT_TYPE_SRQ_LIMIT = 0x14, MTHCA_EVENT_TYPE_CQ_ERROR = 0x04, MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06, MTHCA_EVENT_TYPE_PATH_MIG_FAILED = 0x07, MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08, MTHCA_EVENT_TYPE_PORT_CHANGE = 0x09, MTHCA_EVENT_TYPE_EQ_OVERFLOW = 0x0f, MTHCA_EVENT_TYPE_ECC_DETECT = 0x0e, MTHCA_EVENT_TYPE_CMD = 0x0a }; #define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG) | \ (1ULL << MTHCA_EVENT_TYPE_COMM_EST) | \ (1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED) | \ (1ULL << MTHCA_EVENT_TYPE_CQ_ERROR) | \ (1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \ (1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \ (1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \ (1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ (1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \ (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \ (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \ (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT)) #define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \ (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ (1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT)) #define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD) #define MTHCA_EQ_DB_INC_CI (1 << 24) #define MTHCA_EQ_DB_REQ_NOT (2 << 24) #define MTHCA_EQ_DB_DISARM_CQ (3 << 24) #define MTHCA_EQ_DB_SET_CI (4 << 24) #define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24) struct mthca_eqe { u8 reserved1; u8 type; u8 reserved2; u8 subtype; union { u32 raw[6]; struct { __be32 cqn; } __attribute__((packed)) comp; struct { u16 reserved1; __be16 token; u32 reserved2; u8 reserved3[3]; u8 status; __be64 out_param; } __attribute__((packed)) cmd; struct { __be32 qpn; } __attribute__((packed)) qp; struct { __be32 srqn; } __attribute__((packed)) srq; struct { __be32 cqn; u32 reserved1; u8 reserved2[3]; u8 syndrome; } __attribute__((packed)) cq_err; struct { u32 reserved1[2]; __be32 port; } __attribute__((packed)) port_change; } event; u8 reserved3[3]; u8 owner; } __attribute__((packed)); #define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7) #define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7) static inline u64 async_mask(struct mthca_dev *dev) { return dev->mthca_flags & MTHCA_FLAG_SRQ ? MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK : MTHCA_ASYNC_EVENT_MASK; } static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) { /* * This barrier makes sure that all updates to ownership bits * done by set_eqe_hw() hit memory before the consumer index * is updated. set_eq_ci() allows the HCA to possibly write * more EQ entries, and we want to avoid the exceedingly * unlikely possibility of the HCA writing an entry and then * having set_eqe_hw() overwrite the owner field. */ wmb(); mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1), dev->kar + MTHCA_EQ_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) { /* See comment in tavor_set_eq_ci() above. */ wmb(); __raw_writel((__force u32) cpu_to_be32(ci), dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8); /* We still want ordering, just not swabbing, so add a barrier */ mb(); } static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) { if (mthca_is_memfree(dev)) arbel_set_eq_ci(dev, eq, ci); else tavor_set_eq_ci(dev, eq, ci); } static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn) { mthca_write64(MTHCA_EQ_DB_REQ_NOT | eqn, 0, dev->kar + MTHCA_EQ_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask) { writel(eqn_mask, dev->eq_regs.arbel.eq_arm); } static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) { if (!mthca_is_memfree(dev)) { mthca_write64(MTHCA_EQ_DB_DISARM_CQ | eqn, cqn, dev->kar + MTHCA_EQ_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } } static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry) { unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE; return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; } static inline struct mthca_eqe *next_eqe_sw(struct mthca_eq *eq) { struct mthca_eqe *eqe; eqe = get_eqe(eq, eq->cons_index); return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe; } static inline void set_eqe_hw(struct mthca_eqe *eqe) { eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW; } static void port_change(struct mthca_dev *dev, int port, int active) { struct ib_event record; mthca_dbg(dev, "Port change to %s for port %d\n", active ? "active" : "down", port); record.device = &dev->ib_dev; record.event = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; record.element.port_num = port; ib_dispatch_event(&record); } static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) { struct mthca_eqe *eqe; int disarm_cqn; int eqes_found = 0; int set_ci = 0; while ((eqe = next_eqe_sw(eq))) { /* * Make sure we read EQ entry contents after we've * checked the ownership bit. */ rmb(); switch (eqe->type) { case MTHCA_EVENT_TYPE_COMP: disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; disarm_cq(dev, eq->eqn, disarm_cqn); mthca_cq_completion(dev, disarm_cqn); break; case MTHCA_EVENT_TYPE_PATH_MIG: mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, IB_EVENT_PATH_MIG); break; case MTHCA_EVENT_TYPE_COMM_EST: mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, IB_EVENT_COMM_EST); break; case MTHCA_EVENT_TYPE_SQ_DRAINED: mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, IB_EVENT_SQ_DRAINED); break; case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE: mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, IB_EVENT_QP_LAST_WQE_REACHED); break; case MTHCA_EVENT_TYPE_SRQ_LIMIT: mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, IB_EVENT_SRQ_LIMIT_REACHED); break; case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR: mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, IB_EVENT_QP_FATAL); break; case MTHCA_EVENT_TYPE_PATH_MIG_FAILED: mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, IB_EVENT_PATH_MIG_ERR); break; case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR: mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, IB_EVENT_QP_REQ_ERR); break; case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR: mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, IB_EVENT_QP_ACCESS_ERR); break; case MTHCA_EVENT_TYPE_CMD: mthca_cmd_event(dev, be16_to_cpu(eqe->event.cmd.token), eqe->event.cmd.status, be64_to_cpu(eqe->event.cmd.out_param)); break; case MTHCA_EVENT_TYPE_PORT_CHANGE: port_change(dev, (be32_to_cpu(eqe->event.port_change.port) >> 28) & 3, eqe->subtype == 0x4); break; case MTHCA_EVENT_TYPE_CQ_ERROR: mthca_warn(dev, "CQ %s on CQN %06x\n", eqe->event.cq_err.syndrome == 1 ? "overrun" : "access violation", be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), IB_EVENT_CQ_ERR); break; case MTHCA_EVENT_TYPE_EQ_OVERFLOW: mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); break; case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR: case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR: case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR: case MTHCA_EVENT_TYPE_ECC_DETECT: default: mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n", eqe->type, eqe->subtype, eq->eqn); break; }; set_eqe_hw(eqe); ++eq->cons_index; eqes_found = 1; ++set_ci; /* * The HCA will think the queue has overflowed if we * don't tell it we've been processing events. We * create our EQs with MTHCA_NUM_SPARE_EQE extra * entries, so we must update our consumer index at * least that often. */ if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) { /* * Conditional on hca_type is OK here because * this is a rare case, not the fast path. */ set_eq_ci(dev, eq, eq->cons_index); set_ci = 0; } } /* * Rely on caller to set consumer index so that we don't have * to test hca_type in our interrupt handling fast path. */ return eqes_found; } static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr) { struct mthca_dev *dev = dev_ptr; u32 ecr; int i; if (dev->eq_table.clr_mask) writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); ecr = readl(dev->eq_regs.tavor.ecr_base + 4); if (!ecr) return IRQ_NONE; writel(ecr, dev->eq_regs.tavor.ecr_base + MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4); for (i = 0; i < MTHCA_NUM_EQ; ++i) if (ecr & dev->eq_table.eq[i].eqn_mask) { if (mthca_eq_int(dev, &dev->eq_table.eq[i])) tavor_set_eq_ci(dev, &dev->eq_table.eq[i], dev->eq_table.eq[i].cons_index); tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); } return IRQ_HANDLED; } static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr) { struct mthca_eq *eq = eq_ptr; struct mthca_dev *dev = eq->dev; mthca_eq_int(dev, eq); tavor_set_eq_ci(dev, eq, eq->cons_index); tavor_eq_req_not(dev, eq->eqn); /* MSI-X vectors always belong to us */ return IRQ_HANDLED; } static irqreturn_t mthca_arbel_interrupt(int irq, void *dev_ptr) { struct mthca_dev *dev = dev_ptr; int work = 0; int i; if (dev->eq_table.clr_mask) writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); for (i = 0; i < MTHCA_NUM_EQ; ++i) if (mthca_eq_int(dev, &dev->eq_table.eq[i])) { work = 1; arbel_set_eq_ci(dev, &dev->eq_table.eq[i], dev->eq_table.eq[i].cons_index); } arbel_eq_req_not(dev, dev->eq_table.arm_mask); return IRQ_RETVAL(work); } static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr) { struct mthca_eq *eq = eq_ptr; struct mthca_dev *dev = eq->dev; mthca_eq_int(dev, eq); arbel_set_eq_ci(dev, eq, eq->cons_index); arbel_eq_req_not(dev, eq->eqn_mask); /* MSI-X vectors always belong to us */ return IRQ_HANDLED; } static int mthca_create_eq(struct mthca_dev *dev, int nent, u8 intr, struct mthca_eq *eq) { int npages; u64 *dma_list = NULL; dma_addr_t t; struct mthca_mailbox *mailbox; struct mthca_eq_context *eq_context; int err = -ENOMEM; int i; u8 status; eq->dev = dev; eq->nent = roundup_pow_of_two(max(nent, 2)); npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE; eq->page_list = kmalloc(npages * sizeof *eq->page_list, GFP_KERNEL); if (!eq->page_list) goto err_out; for (i = 0; i < npages; ++i) eq->page_list[i].buf = NULL; dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); if (!dma_list) goto err_out_free; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) goto err_out_free; eq_context = mailbox->buf; for (i = 0; i < npages; ++i) { eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, &t, GFP_KERNEL); if (!eq->page_list[i].buf) goto err_out_free_pages; dma_list[i] = t; dma_unmap_addr_set(&eq->page_list[i], mapping, t); clear_page(eq->page_list[i].buf); } for (i = 0; i < eq->nent; ++i) set_eqe_hw(get_eqe(eq, i)); eq->eqn = mthca_alloc(&dev->eq_table.alloc); if (eq->eqn == -1) goto err_out_free_pages; err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num, dma_list, PAGE_SHIFT, npages, 0, npages * PAGE_SIZE, MTHCA_MPT_FLAG_LOCAL_WRITE | MTHCA_MPT_FLAG_LOCAL_READ, &eq->mr); if (err) goto err_out_free_eq; memset(eq_context, 0, sizeof *eq_context); eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK | MTHCA_EQ_OWNER_HW | MTHCA_EQ_STATE_ARMED | MTHCA_EQ_FLAG_TR); if (mthca_is_memfree(dev)) eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL); eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24); if (mthca_is_memfree(dev)) { eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num); } else { eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index); eq_context->tavor_pd = cpu_to_be32(dev->driver_pd.pd_num); } eq_context->intr = intr; eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey); err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status); if (err) { mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err); goto err_out_free_mr; } if (status) { mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n", status); err = -EINVAL; goto err_out_free_mr; } kfree(dma_list); mthca_free_mailbox(dev, mailbox); eq->eqn_mask = swab32(1 << eq->eqn); eq->cons_index = 0; dev->eq_table.arm_mask |= eq->eqn_mask; mthca_dbg(dev, "Allocated EQ %d with %d entries\n", eq->eqn, eq->nent); return err; err_out_free_mr: mthca_free_mr(dev, &eq->mr); err_out_free_eq: mthca_free(&dev->eq_table.alloc, eq->eqn); err_out_free_pages: for (i = 0; i < npages; ++i) if (eq->page_list[i].buf) dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, eq->page_list[i].buf, dma_unmap_addr(&eq->page_list[i], mapping)); mthca_free_mailbox(dev, mailbox); err_out_free: kfree(eq->page_list); kfree(dma_list); err_out: return err; } static void mthca_free_eq(struct mthca_dev *dev, struct mthca_eq *eq) { struct mthca_mailbox *mailbox; int err; u8 status; int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / PAGE_SIZE; int i; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return; err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status); if (err) mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err); if (status) mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status); dev->eq_table.arm_mask &= ~eq->eqn_mask; if (0) { mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) { if (i % 4 == 0) printk("[%02x] ", i * 4); printk(" %08x", be32_to_cpup(mailbox->buf + i * 4)); if ((i + 1) % 4 == 0) printk("\n"); } } mthca_free_mr(dev, &eq->mr); for (i = 0; i < npages; ++i) pci_free_consistent(dev->pdev, PAGE_SIZE, eq->page_list[i].buf, dma_unmap_addr(&eq->page_list[i], mapping)); kfree(eq->page_list); mthca_free_mailbox(dev, mailbox); } static void mthca_free_irqs(struct mthca_dev *dev) { int i; if (dev->eq_table.have_irq) free_irq(dev->pdev->irq, dev); for (i = 0; i < MTHCA_NUM_EQ; ++i) if (dev->eq_table.eq[i].have_irq) { free_irq(dev->eq_table.eq[i].msi_x_vector, dev->eq_table.eq + i); dev->eq_table.eq[i].have_irq = 0; } } static int mthca_map_reg(struct mthca_dev *dev, unsigned long offset, unsigned long size, void __iomem **map) { phys_addr_t base = pci_resource_start(dev->pdev, 0); *map = ioremap(base + offset, size); if (!*map) return -ENOMEM; return 0; } static int mthca_map_eq_regs(struct mthca_dev *dev) { if (mthca_is_memfree(dev)) { /* * We assume that the EQ arm and EQ set CI registers * fall within the first BAR. We can't trust the * values firmware gives us, since those addresses are * valid on the HCA's side of the PCI bus but not * necessarily the host side. */ if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, &dev->clr_base)) { mthca_err(dev, "Couldn't map interrupt clear register, " "aborting.\n"); return -ENOMEM; } /* * Add 4 because we limit ourselves to EQs 0 ... 31, * so we only need the low word of the register. */ if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) & dev->fw.arbel.eq_arm_base) + 4, 4, &dev->eq_regs.arbel.eq_arm)) { mthca_err(dev, "Couldn't map EQ arm register, aborting.\n"); iounmap(dev->clr_base); return -ENOMEM; } if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & dev->fw.arbel.eq_set_ci_base, MTHCA_EQ_SET_CI_SIZE, &dev->eq_regs.arbel.eq_set_ci_base)) { mthca_err(dev, "Couldn't map EQ CI register, aborting.\n"); iounmap(dev->eq_regs.arbel.eq_arm); iounmap(dev->clr_base); return -ENOMEM; } } else { if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, &dev->clr_base)) { mthca_err(dev, "Couldn't map interrupt clear register, " "aborting.\n"); return -ENOMEM; } if (mthca_map_reg(dev, MTHCA_ECR_BASE, MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE, &dev->eq_regs.tavor.ecr_base)) { mthca_err(dev, "Couldn't map ecr register, " "aborting.\n"); iounmap(dev->clr_base); return -ENOMEM; } } return 0; } static void mthca_unmap_eq_regs(struct mthca_dev *dev) { if (mthca_is_memfree(dev)) { iounmap(dev->eq_regs.arbel.eq_set_ci_base); iounmap(dev->eq_regs.arbel.eq_arm); iounmap(dev->clr_base); } else { iounmap(dev->eq_regs.tavor.ecr_base); iounmap(dev->clr_base); } } int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) { int ret; u8 status; /* * We assume that mapping one page is enough for the whole EQ * context table. This is fine with all current HCAs, because * we only use 32 EQs and each EQ uses 32 bytes of context * memory, or 1 KB total. */ dev->eq_table.icm_virt = icm_virt; dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER); if (!dev->eq_table.icm_page) return -ENOMEM; dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) { __free_page(dev->eq_table.icm_page); return -ENOMEM; } ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status); if (!ret && status) ret = -EINVAL; if (ret) { pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); __free_page(dev->eq_table.icm_page); } return ret; } void mthca_unmap_eq_icm(struct mthca_dev *dev) { u8 status; mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1, &status); pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); __free_page(dev->eq_table.icm_page); } int mthca_init_eq_table(struct mthca_dev *dev) { int err; u8 status; u8 intr; int i; err = mthca_alloc_init(&dev->eq_table.alloc, dev->limits.num_eqs, dev->limits.num_eqs - 1, dev->limits.reserved_eqs); if (err) return err; err = mthca_map_eq_regs(dev); if (err) goto err_out_free; if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { dev->eq_table.clr_mask = 0; } else { dev->eq_table.clr_mask = swab32(1 << (dev->eq_table.inta_pin & 31)); dev->eq_table.clr_int = dev->clr_base + (dev->eq_table.inta_pin < 32 ? 4 : 0); } dev->eq_table.arm_mask = 0; intr = dev->eq_table.inta_pin; err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE, (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr, &dev->eq_table.eq[MTHCA_EQ_COMP]); if (err) goto err_out_unmap; err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE, (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr, &dev->eq_table.eq[MTHCA_EQ_ASYNC]); if (err) goto err_out_comp; err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE, (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr, &dev->eq_table.eq[MTHCA_EQ_CMD]); if (err) goto err_out_async; if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { static const char *eq_name[] = { [MTHCA_EQ_COMP] = DRV_NAME "-comp", [MTHCA_EQ_ASYNC] = DRV_NAME "-async", [MTHCA_EQ_CMD] = DRV_NAME "-cmd" }; for (i = 0; i < MTHCA_NUM_EQ; ++i) { snprintf(dev->eq_table.eq[i].irq_name, IB_DEVICE_NAME_MAX, "%s@pci:%s", eq_name[i], pci_name(dev->pdev)); err = request_irq(dev->eq_table.eq[i].msi_x_vector, mthca_is_memfree(dev) ? mthca_arbel_msi_x_interrupt : mthca_tavor_msi_x_interrupt, 0, dev->eq_table.eq[i].irq_name, dev->eq_table.eq + i); if (err) goto err_out_cmd; dev->eq_table.eq[i].have_irq = 1; } } else { snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX, DRV_NAME "@pci:%s", pci_name(dev->pdev)); err = request_irq(dev->pdev->irq, mthca_is_memfree(dev) ? mthca_arbel_interrupt : mthca_tavor_interrupt, IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev); if (err) goto err_out_cmd; dev->eq_table.have_irq = 1; } err = mthca_MAP_EQ(dev, async_mask(dev), 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); if (err) mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err); if (status) mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n", dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status); err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); if (err) mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n", dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err); if (status) mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n", dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status); for (i = 0; i < MTHCA_NUM_EQ; ++i) if (mthca_is_memfree(dev)) arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask); else tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); return 0; err_out_cmd: mthca_free_irqs(dev); mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]); err_out_async: mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]); err_out_comp: mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]); err_out_unmap: mthca_unmap_eq_regs(dev); err_out_free: mthca_alloc_cleanup(&dev->eq_table.alloc); return err; } void mthca_cleanup_eq_table(struct mthca_dev *dev) { u8 status; int i; mthca_free_irqs(dev); mthca_MAP_EQ(dev, async_mask(dev), 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); for (i = 0; i < MTHCA_NUM_EQ; ++i) mthca_free_eq(dev, &dev->eq_table.eq[i]); mthca_unmap_eq_regs(dev); mthca_alloc_cleanup(&dev->eq_table.alloc); }
gpl-2.0
omnirom/android_kernel_huawei_angler
arch/arm/mach-at91/board-csb637.c
2636
3520
/* * linux/arch/arm/mach-at91/board-csb637.c * * Copyright (C) 2005 SAN People * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/init.h> #include <linux/gpio.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/mtd/physmap.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include "at91_aic.h" #include "board.h" #include "generic.h" static void __init csb637_init_early(void) { /* Initialize processor: 3.6864 MHz crystal */ at91_initialize(3686400); } static struct macb_platform_data __initdata csb637_eth_data = { .phy_irq_pin = AT91_PIN_PC0, .is_rmii = 0, }; static struct at91_usbh_data __initdata csb637_usbh_data = { .ports = 2, .vbus_pin = {-EINVAL, -EINVAL}, .overcurrent_pin= {-EINVAL, -EINVAL}, }; static struct at91_udc_data __initdata csb637_udc_data = { .vbus_pin = AT91_PIN_PB28, .pullup_pin = AT91_PIN_PB1, }; #define CSB_FLASH_BASE AT91_CHIPSELECT_0 #define CSB_FLASH_SIZE SZ_16M static struct mtd_partition csb_flash_partitions[] = { { .name = "uMON flash", .offset = 0, .size = MTDPART_SIZ_FULL, .mask_flags = MTD_WRITEABLE, /* read only */ } }; static struct physmap_flash_data csb_flash_data = { .width = 2, .parts = csb_flash_partitions, .nr_parts = ARRAY_SIZE(csb_flash_partitions), }; static struct resource csb_flash_resources[] = { { .start = CSB_FLASH_BASE, .end = CSB_FLASH_BASE + CSB_FLASH_SIZE - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device csb_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &csb_flash_data, }, .resource = csb_flash_resources, .num_resources = ARRAY_SIZE(csb_flash_resources), }; static struct gpio_led csb_leds[] = { { /* "d1", red */ .name = "d1", .gpio = AT91_PIN_PB2, .active_low = 1, .default_trigger = "heartbeat", }, }; static void __init csb637_board_init(void) { /* LED(s) */ at91_gpio_leds(csb_leds, ARRAY_SIZE(csb_leds)); /* Serial */ /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); at91_add_device_serial(); /* Ethernet */ at91_add_device_eth(&csb637_eth_data); /* USB Host */ at91_add_device_usbh(&csb637_usbh_data); /* USB Device */ at91_add_device_udc(&csb637_udc_data); /* I2C */ at91_add_device_i2c(NULL, 0); /* SPI */ at91_add_device_spi(NULL, 0); /* NOR flash */ platform_device_register(&csb_flash); } MACHINE_START(CSB637, "Cogent CSB637") /* Maintainer: Bill Gatliff */ .init_time = at91rm9200_timer_init, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = csb637_init_early, .init_irq = at91_init_irq_default, .init_machine = csb637_board_init, MACHINE_END
gpl-2.0
MTDEV-KERNEL/msm_kernel
arch/ia64/sn/kernel/sn2/sn_hwperf.c
2892
23215
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004-2006 Silicon Graphics, Inc. All rights reserved. * * SGI Altix topology and hardware performance monitoring API. * Mark Goodwin <markgw@sgi.com>. * * Creates /proc/sgi_sn/sn_topology (read-only) to export * info about Altix nodes, routers, CPUs and NumaLink * interconnection/topology. * * Also creates a dynamic misc device named "sn_hwperf" * that supports an ioctl interface to call down into SAL * to discover hw objects, topology and to read/write * memory mapped registers, e.g. for performance monitoring. * The "sn_hwperf" device is registered only after the procfs * file is first opened, i.e. only if/when it's needed. * * This API is used by SGI Performance Co-Pilot and other * tools, see http://oss.sgi.com/projects/pcp */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/seq_file.h> #include <linux/miscdevice.h> #include <linux/utsname.h> #include <linux/cpumask.h> #include <linux/nodemask.h> #include <linux/smp.h> #include <linux/mutex.h> #include <asm/processor.h> #include <asm/topology.h> #include <asm/uaccess.h> #include <asm/sal.h> #include <asm/sn/io.h> #include <asm/sn/sn_sal.h> #include <asm/sn/module.h> #include <asm/sn/geo.h> #include <asm/sn/sn2/sn_hwperf.h> #include <asm/sn/addrs.h> static void *sn_hwperf_salheap = NULL; static int sn_hwperf_obj_cnt = 0; static nasid_t sn_hwperf_master_nasid = INVALID_NASID; static int sn_hwperf_init(void); static DEFINE_MUTEX(sn_hwperf_init_mutex); #define cnode_possible(n) ((n) < num_cnodes) static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret) { int e; u64 sz; struct sn_hwperf_object_info *objbuf = NULL; if ((e = sn_hwperf_init()) < 0) { printk(KERN_ERR "sn_hwperf_init failed: err %d\n", e); goto out; } sz = sn_hwperf_obj_cnt * sizeof(struct sn_hwperf_object_info); objbuf = vmalloc(sz); if (objbuf == NULL) { printk("sn_hwperf_enum_objects: vmalloc(%d) failed\n", (int)sz); e = -ENOMEM; goto out; } e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_OBJECTS, 0, sz, (u64) objbuf, 0, 0, NULL); if (e != SN_HWPERF_OP_OK) { e = -EINVAL; vfree(objbuf); } out: *nobj = sn_hwperf_obj_cnt; *ret = objbuf; return e; } static int sn_hwperf_location_to_bpos(char *location, int *rack, int *bay, int *slot, int *slab) { char type; /* first scan for an old style geoid string */ if (sscanf(location, "%03d%c%02d#%d", rack, &type, bay, slab) == 4) *slot = 0; else /* scan for a new bladed geoid string */ if (sscanf(location, "%03d%c%02d^%02d#%d", rack, &type, bay, slot, slab) != 5) return -1; /* success */ return 0; } static int sn_hwperf_geoid_to_cnode(char *location) { int cnode; geoid_t geoid; moduleid_t module_id; int rack, bay, slot, slab; int this_rack, this_bay, this_slot, this_slab; if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab)) return -1; /* * FIXME: replace with cleaner for_each_XXX macro which addresses * both compute and IO nodes once ACPI3.0 is available. */ for (cnode = 0; cnode < num_cnodes; cnode++) { geoid = cnodeid_get_geoid(cnode); module_id = geo_module(geoid); this_rack = MODULE_GET_RACK(module_id); this_bay = MODULE_GET_BPOS(module_id); this_slot = geo_slot(geoid); this_slab = geo_slab(geoid); if (rack == this_rack && bay == this_bay && slot == this_slot && slab == this_slab) { break; } } return cnode_possible(cnode) ? cnode : -1; } static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj) { if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj)) BUG(); if (SN_HWPERF_FOREIGN(obj)) return -1; return sn_hwperf_geoid_to_cnode(obj->location); } static int sn_hwperf_generic_ordinal(struct sn_hwperf_object_info *obj, struct sn_hwperf_object_info *objs) { int ordinal; struct sn_hwperf_object_info *p; for (ordinal=0, p=objs; p != obj; p++) { if (SN_HWPERF_FOREIGN(p)) continue; if (SN_HWPERF_SAME_OBJTYPE(p, obj)) ordinal++; } return ordinal; } static const char *slabname_node = "node"; /* SHub asic */ static const char *slabname_ionode = "ionode"; /* TIO asic */ static const char *slabname_router = "router"; /* NL3R or NL4R */ static const char *slabname_other = "other"; /* unknown asic */ static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj, struct sn_hwperf_object_info *objs, int *ordinal) { int isnode; const char *slabname = slabname_other; if ((isnode = SN_HWPERF_IS_NODE(obj)) || SN_HWPERF_IS_IONODE(obj)) { slabname = isnode ? slabname_node : slabname_ionode; *ordinal = sn_hwperf_obj_to_cnode(obj); } else { *ordinal = sn_hwperf_generic_ordinal(obj, objs); if (SN_HWPERF_IS_ROUTER(obj)) slabname = slabname_router; } return slabname; } static void print_pci_topology(struct seq_file *s) { char *p; size_t sz; int e; for (sz = PAGE_SIZE; sz < 16 * PAGE_SIZE; sz += PAGE_SIZE) { if (!(p = kmalloc(sz, GFP_KERNEL))) break; e = ia64_sn_ioif_get_pci_topology(__pa(p), sz); if (e == SALRET_OK) seq_puts(s, p); kfree(p); if (e == SALRET_OK || e == SALRET_NOT_IMPLEMENTED) break; } } static inline int sn_hwperf_has_cpus(cnodeid_t node) { return node < MAX_NUMNODES && node_online(node) && nr_cpus_node(node); } static inline int sn_hwperf_has_mem(cnodeid_t node) { return node < MAX_NUMNODES && node_online(node) && NODE_DATA(node)->node_present_pages; } static struct sn_hwperf_object_info * sn_hwperf_findobj_id(struct sn_hwperf_object_info *objbuf, int nobj, int id) { int i; struct sn_hwperf_object_info *p = objbuf; for (i=0; i < nobj; i++, p++) { if (p->id == id) return p; } return NULL; } static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objbuf, int nobj, cnodeid_t node, cnodeid_t *near_mem_node, cnodeid_t *near_cpu_node) { int e; struct sn_hwperf_object_info *nodeobj = NULL; struct sn_hwperf_object_info *op; struct sn_hwperf_object_info *dest; struct sn_hwperf_object_info *router; struct sn_hwperf_port_info ptdata[16]; int sz, i, j; cnodeid_t c; int found_mem = 0; int found_cpu = 0; if (!cnode_possible(node)) return -EINVAL; if (sn_hwperf_has_cpus(node)) { if (near_cpu_node) *near_cpu_node = node; found_cpu++; } if (sn_hwperf_has_mem(node)) { if (near_mem_node) *near_mem_node = node; found_mem++; } if (found_cpu && found_mem) return 0; /* trivially successful */ /* find the argument node object */ for (i=0, op=objbuf; i < nobj; i++, op++) { if (!SN_HWPERF_IS_NODE(op) && !SN_HWPERF_IS_IONODE(op)) continue; if (node == sn_hwperf_obj_to_cnode(op)) { nodeobj = op; break; } } if (!nodeobj) { e = -ENOENT; goto err; } /* get it's interconnect topology */ sz = op->ports * sizeof(struct sn_hwperf_port_info); BUG_ON(sz > sizeof(ptdata)); e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_PORTS, nodeobj->id, sz, (u64)&ptdata, 0, 0, NULL); if (e != SN_HWPERF_OP_OK) { e = -EINVAL; goto err; } /* find nearest node with cpus and nearest memory */ for (router=NULL, j=0; j < op->ports; j++) { dest = sn_hwperf_findobj_id(objbuf, nobj, ptdata[j].conn_id); if (dest && SN_HWPERF_IS_ROUTER(dest)) router = dest; if (!dest || SN_HWPERF_FOREIGN(dest) || !SN_HWPERF_IS_NODE(dest) || SN_HWPERF_IS_IONODE(dest)) { continue; } c = sn_hwperf_obj_to_cnode(dest); if (!found_cpu && sn_hwperf_has_cpus(c)) { if (near_cpu_node) *near_cpu_node = c; found_cpu++; } if (!found_mem && sn_hwperf_has_mem(c)) { if (near_mem_node) *near_mem_node = c; found_mem++; } } if (router && (!found_cpu || !found_mem)) { /* search for a node connected to the same router */ sz = router->ports * sizeof(struct sn_hwperf_port_info); BUG_ON(sz > sizeof(ptdata)); e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_PORTS, router->id, sz, (u64)&ptdata, 0, 0, NULL); if (e != SN_HWPERF_OP_OK) { e = -EINVAL; goto err; } for (j=0; j < router->ports; j++) { dest = sn_hwperf_findobj_id(objbuf, nobj, ptdata[j].conn_id); if (!dest || dest->id == node || SN_HWPERF_FOREIGN(dest) || !SN_HWPERF_IS_NODE(dest) || SN_HWPERF_IS_IONODE(dest)) { continue; } c = sn_hwperf_obj_to_cnode(dest); if (!found_cpu && sn_hwperf_has_cpus(c)) { if (near_cpu_node) *near_cpu_node = c; found_cpu++; } if (!found_mem && sn_hwperf_has_mem(c)) { if (near_mem_node) *near_mem_node = c; found_mem++; } if (found_cpu && found_mem) break; } } if (!found_cpu || !found_mem) { /* resort to _any_ node with CPUs and memory */ for (i=0, op=objbuf; i < nobj; i++, op++) { if (SN_HWPERF_FOREIGN(op) || SN_HWPERF_IS_IONODE(op) || !SN_HWPERF_IS_NODE(op)) { continue; } c = sn_hwperf_obj_to_cnode(op); if (!found_cpu && sn_hwperf_has_cpus(c)) { if (near_cpu_node) *near_cpu_node = c; found_cpu++; } if (!found_mem && sn_hwperf_has_mem(c)) { if (near_mem_node) *near_mem_node = c; found_mem++; } if (found_cpu && found_mem) break; } } if (!found_cpu || !found_mem) e = -ENODATA; err: return e; } static int sn_topology_show(struct seq_file *s, void *d) { int sz; int pt; int e = 0; int i; int j; const char *slabname; int ordinal; char slice; struct cpuinfo_ia64 *c; struct sn_hwperf_port_info *ptdata; struct sn_hwperf_object_info *p; struct sn_hwperf_object_info *obj = d; /* this object */ struct sn_hwperf_object_info *objs = s->private; /* all objects */ u8 shubtype; u8 system_size; u8 sharing_size; u8 partid; u8 coher; u8 nasid_shift; u8 region_size; u16 nasid_mask; int nasid_msb; if (obj == objs) { seq_printf(s, "# sn_topology version 2\n"); seq_printf(s, "# objtype ordinal location partition" " [attribute value [, ...]]\n"); if (ia64_sn_get_sn_info(0, &shubtype, &nasid_mask, &nasid_shift, &system_size, &sharing_size, &partid, &coher, &region_size)) BUG(); for (nasid_msb=63; nasid_msb > 0; nasid_msb--) { if (((u64)nasid_mask << nasid_shift) & (1ULL << nasid_msb)) break; } seq_printf(s, "partition %u %s local " "shubtype %s, " "nasid_mask 0x%016llx, " "nasid_bits %d:%d, " "system_size %d, " "sharing_size %d, " "coherency_domain %d, " "region_size %d\n", partid, utsname()->nodename, shubtype ? "shub2" : "shub1", (u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift, system_size, sharing_size, coher, region_size); print_pci_topology(s); } if (SN_HWPERF_FOREIGN(obj)) { /* private in another partition: not interesting */ return 0; } for (i = 0; i < SN_HWPERF_MAXSTRING && obj->name[i]; i++) { if (obj->name[i] == ' ') obj->name[i] = '_'; } slabname = sn_hwperf_get_slabname(obj, objs, &ordinal); seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location, obj->sn_hwp_this_part ? "local" : "shared", obj->name); if (ordinal < 0 || (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))) seq_putc(s, '\n'); else { cnodeid_t near_mem = -1; cnodeid_t near_cpu = -1; seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal)); if (sn_hwperf_get_nearest_node_objdata(objs, sn_hwperf_obj_cnt, ordinal, &near_mem, &near_cpu) == 0) { seq_printf(s, ", near_mem_nodeid %d, near_cpu_nodeid %d", near_mem, near_cpu); } if (!SN_HWPERF_IS_IONODE(obj)) { for_each_online_node(i) { seq_printf(s, i ? ":%d" : ", dist %d", node_distance(ordinal, i)); } } seq_putc(s, '\n'); /* * CPUs on this node, if any */ if (!SN_HWPERF_IS_IONODE(obj)) { for_each_cpu_and(i, cpu_online_mask, cpumask_of_node(ordinal)) { slice = 'a' + cpuid_to_slice(i); c = cpu_data(i); seq_printf(s, "cpu %d %s%c local" " freq %luMHz, arch ia64", i, obj->location, slice, c->proc_freq / 1000000); for_each_online_cpu(j) { seq_printf(s, j ? ":%d" : ", dist %d", node_distance( cpu_to_node(i), cpu_to_node(j))); } seq_putc(s, '\n'); } } } if (obj->ports) { /* * numalink ports */ sz = obj->ports * sizeof(struct sn_hwperf_port_info); if ((ptdata = kmalloc(sz, GFP_KERNEL)) == NULL) return -ENOMEM; e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_PORTS, obj->id, sz, (u64) ptdata, 0, 0, NULL); if (e != SN_HWPERF_OP_OK) return -EINVAL; for (ordinal=0, p=objs; p != obj; p++) { if (!SN_HWPERF_FOREIGN(p)) ordinal += p->ports; } for (pt = 0; pt < obj->ports; pt++) { for (p = objs, i = 0; i < sn_hwperf_obj_cnt; i++, p++) { if (ptdata[pt].conn_id == p->id) { break; } } seq_printf(s, "numalink %d %s-%d", ordinal+pt, obj->location, ptdata[pt].port); if (i >= sn_hwperf_obj_cnt) { /* no connection */ seq_puts(s, " local endpoint disconnected" ", protocol unknown\n"); continue; } if (obj->sn_hwp_this_part && p->sn_hwp_this_part) /* both ends local to this partition */ seq_puts(s, " local"); else if (SN_HWPERF_FOREIGN(p)) /* both ends of the link in foreign partiton */ seq_puts(s, " foreign"); else /* link straddles a partition */ seq_puts(s, " shared"); /* * Unlikely, but strictly should query the LLP config * registers because an NL4R can be configured to run * NL3 protocol, even when not talking to an NL3 router. * Ditto for node-node. */ seq_printf(s, " endpoint %s-%d, protocol %s\n", p->location, ptdata[pt].conn_port, (SN_HWPERF_IS_NL3ROUTER(obj) || SN_HWPERF_IS_NL3ROUTER(p)) ? "LLP3" : "LLP4"); } kfree(ptdata); } return 0; } static void *sn_topology_start(struct seq_file *s, loff_t * pos) { struct sn_hwperf_object_info *objs = s->private; if (*pos < sn_hwperf_obj_cnt) return (void *)(objs + *pos); return NULL; } static void *sn_topology_next(struct seq_file *s, void *v, loff_t * pos) { ++*pos; return sn_topology_start(s, pos); } static void sn_topology_stop(struct seq_file *m, void *v) { return; } /* * /proc/sgi_sn/sn_topology, read-only using seq_file */ static const struct seq_operations sn_topology_seq_ops = { .start = sn_topology_start, .next = sn_topology_next, .stop = sn_topology_stop, .show = sn_topology_show }; struct sn_hwperf_op_info { u64 op; struct sn_hwperf_ioctl_args *a; void *p; int *v0; int ret; }; static void sn_hwperf_call_sal(void *info) { struct sn_hwperf_op_info *op_info = info; int r; r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op_info->op, op_info->a->arg, op_info->a->sz, (u64) op_info->p, 0, 0, op_info->v0); op_info->ret = r; } static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) { u32 cpu; u32 use_ipi; int r = 0; cpumask_t save_allowed; cpu = (op_info->a->arg & SN_HWPERF_ARG_CPU_MASK) >> 32; use_ipi = op_info->a->arg & SN_HWPERF_ARG_USE_IPI_MASK; op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK; if (cpu != SN_HWPERF_ARG_ANY_CPU) { if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { r = -EINVAL; goto out; } } if (cpu == SN_HWPERF_ARG_ANY_CPU || cpu == get_cpu()) { /* don't care, or already on correct cpu */ sn_hwperf_call_sal(op_info); } else { if (use_ipi) { /* use an interprocessor interrupt to call SAL */ smp_call_function_single(cpu, sn_hwperf_call_sal, op_info, 1); } else { /* migrate the task before calling SAL */ save_allowed = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask_of(cpu)); sn_hwperf_call_sal(op_info); set_cpus_allowed_ptr(current, &save_allowed); } } r = op_info->ret; out: return r; } /* map SAL hwperf error code to system error code */ static int sn_hwperf_map_err(int hwperf_err) { int e; switch(hwperf_err) { case SN_HWPERF_OP_OK: e = 0; break; case SN_HWPERF_OP_NOMEM: e = -ENOMEM; break; case SN_HWPERF_OP_NO_PERM: e = -EPERM; break; case SN_HWPERF_OP_IO_ERROR: e = -EIO; break; case SN_HWPERF_OP_BUSY: e = -EBUSY; break; case SN_HWPERF_OP_RECONFIGURE: e = -EAGAIN; break; case SN_HWPERF_OP_INVAL: default: e = -EINVAL; break; } return e; } /* * ioctl for "sn_hwperf" misc device */ static long sn_hwperf_ioctl(struct file *fp, u32 op, unsigned long arg) { struct sn_hwperf_ioctl_args a; struct cpuinfo_ia64 *cdata; struct sn_hwperf_object_info *objs; struct sn_hwperf_object_info *cpuobj; struct sn_hwperf_op_info op_info; void *p = NULL; int nobj; char slice; int node; int r; int v0; int i; int j; /* only user requests are allowed here */ if ((op & SN_HWPERF_OP_MASK) < 10) { r = -EINVAL; goto error; } r = copy_from_user(&a, (const void __user *)arg, sizeof(struct sn_hwperf_ioctl_args)); if (r != 0) { r = -EFAULT; goto error; } /* * Allocate memory to hold a kernel copy of the user buffer. The * buffer contents are either copied in or out (or both) of user * space depending on the flags encoded in the requested operation. */ if (a.ptr) { p = vmalloc(a.sz); if (!p) { r = -ENOMEM; goto error; } } if (op & SN_HWPERF_OP_MEM_COPYIN) { r = copy_from_user(p, (const void __user *)a.ptr, a.sz); if (r != 0) { r = -EFAULT; goto error; } } switch (op) { case SN_HWPERF_GET_CPU_INFO: if (a.sz == sizeof(u64)) { /* special case to get size needed */ *(u64 *) p = (u64) num_online_cpus() * sizeof(struct sn_hwperf_object_info); } else if (a.sz < num_online_cpus() * sizeof(struct sn_hwperf_object_info)) { r = -ENOMEM; goto error; } else if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) { int cpuobj_index = 0; memset(p, 0, a.sz); for (i = 0; i < nobj; i++) { if (!SN_HWPERF_IS_NODE(objs + i)) continue; node = sn_hwperf_obj_to_cnode(objs + i); for_each_online_cpu(j) { if (node != cpu_to_node(j)) continue; cpuobj = (struct sn_hwperf_object_info *) p + cpuobj_index++; slice = 'a' + cpuid_to_slice(j); cdata = cpu_data(j); cpuobj->id = j; snprintf(cpuobj->name, sizeof(cpuobj->name), "CPU %luMHz %s", cdata->proc_freq / 1000000, cdata->vendor); snprintf(cpuobj->location, sizeof(cpuobj->location), "%s%c", objs[i].location, slice); } } vfree(objs); } break; case SN_HWPERF_GET_NODE_NASID: if (a.sz != sizeof(u64) || (node = a.arg) < 0 || !cnode_possible(node)) { r = -EINVAL; goto error; } *(u64 *)p = (u64)cnodeid_to_nasid(node); break; case SN_HWPERF_GET_OBJ_NODE: i = a.arg; if (a.sz != sizeof(u64) || i < 0) { r = -EINVAL; goto error; } if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) { if (i >= nobj) { r = -EINVAL; vfree(objs); goto error; } if (objs[i].id != a.arg) { for (i = 0; i < nobj; i++) { if (objs[i].id == a.arg) break; } } if (i == nobj) { r = -EINVAL; vfree(objs); goto error; } if (!SN_HWPERF_IS_NODE(objs + i) && !SN_HWPERF_IS_IONODE(objs + i)) { r = -ENOENT; vfree(objs); goto error; } *(u64 *)p = (u64)sn_hwperf_obj_to_cnode(objs + i); vfree(objs); } break; case SN_HWPERF_GET_MMRS: case SN_HWPERF_SET_MMRS: case SN_HWPERF_OBJECT_DISTANCE: op_info.p = p; op_info.a = &a; op_info.v0 = &v0; op_info.op = op; r = sn_hwperf_op_cpu(&op_info); if (r) { r = sn_hwperf_map_err(r); a.v0 = v0; goto error; } break; default: /* all other ops are a direct SAL call */ r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op, a.arg, a.sz, (u64) p, 0, 0, &v0); if (r) { r = sn_hwperf_map_err(r); goto error; } a.v0 = v0; break; } if (op & SN_HWPERF_OP_MEM_COPYOUT) { r = copy_to_user((void __user *)a.ptr, p, a.sz); if (r != 0) { r = -EFAULT; goto error; } } error: vfree(p); return r; } static const struct file_operations sn_hwperf_fops = { .unlocked_ioctl = sn_hwperf_ioctl, .llseek = noop_llseek, }; static struct miscdevice sn_hwperf_dev = { MISC_DYNAMIC_MINOR, "sn_hwperf", &sn_hwperf_fops }; static int sn_hwperf_init(void) { u64 v; int salr; int e = 0; /* single threaded, once-only initialization */ mutex_lock(&sn_hwperf_init_mutex); if (sn_hwperf_salheap) { mutex_unlock(&sn_hwperf_init_mutex); return e; } /* * The PROM code needs a fixed reference node. For convenience the * same node as the console I/O is used. */ sn_hwperf_master_nasid = (nasid_t) ia64_sn_get_console_nasid(); /* * Request the needed size and install the PROM scratch area. * The PROM keeps various tracking bits in this memory area. */ salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid, (u64) SN_HWPERF_GET_HEAPSIZE, 0, (u64) sizeof(u64), (u64) &v, 0, 0, NULL); if (salr != SN_HWPERF_OP_OK) { e = -EINVAL; goto out; } if ((sn_hwperf_salheap = vmalloc(v)) == NULL) { e = -ENOMEM; goto out; } salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_INSTALL_HEAP, 0, v, (u64) sn_hwperf_salheap, 0, 0, NULL); if (salr != SN_HWPERF_OP_OK) { e = -EINVAL; goto out; } salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_OBJECT_COUNT, 0, sizeof(u64), (u64) &v, 0, 0, NULL); if (salr != SN_HWPERF_OP_OK) { e = -EINVAL; goto out; } sn_hwperf_obj_cnt = (int)v; out: if (e < 0 && sn_hwperf_salheap) { vfree(sn_hwperf_salheap); sn_hwperf_salheap = NULL; sn_hwperf_obj_cnt = 0; } mutex_unlock(&sn_hwperf_init_mutex); return e; } int sn_topology_open(struct inode *inode, struct file *file) { int e; struct seq_file *seq; struct sn_hwperf_object_info *objbuf; int nobj; if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) { e = seq_open(file, &sn_topology_seq_ops); seq = file->private_data; seq->private = objbuf; } return e; } int sn_topology_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; vfree(seq->private); return seq_release(inode, file); } int sn_hwperf_get_nearest_node(cnodeid_t node, cnodeid_t *near_mem_node, cnodeid_t *near_cpu_node) { int e; int nobj; struct sn_hwperf_object_info *objbuf; if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) { e = sn_hwperf_get_nearest_node_objdata(objbuf, nobj, node, near_mem_node, near_cpu_node); vfree(objbuf); } return e; } static int __devinit sn_hwperf_misc_register_init(void) { int e; if (!ia64_platform_is("sn2")) return 0; sn_hwperf_init(); /* * Register a dynamic misc device for hwperf ioctls. Platforms * supporting hotplug will create /dev/sn_hwperf, else user * can to look up the minor number in /proc/misc. */ if ((e = misc_register(&sn_hwperf_dev)) != 0) { printk(KERN_ERR "sn_hwperf_misc_register_init: failed to " "register misc device for \"%s\"\n", sn_hwperf_dev.name); } return e; } device_initcall(sn_hwperf_misc_register_init); /* after misc_init() */ EXPORT_SYMBOL(sn_hwperf_get_nearest_node);
gpl-2.0
Freack-v/android_kernel_eagle
drivers/xen/xenbus/xenbus_probe.c
4940
18262
/****************************************************************************** * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __func__, __LINE__, ##args) #include <linux/kernel.h> #include <linux/err.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/notifier.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/module.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/xen/hypervisor.h> #include <xen/xen.h> #include <xen/xenbus.h> #include <xen/events.h> #include <xen/page.h> #include <xen/hvm.h> #include "xenbus_comms.h" #include "xenbus_probe.h" int xen_store_evtchn; EXPORT_SYMBOL_GPL(xen_store_evtchn); struct xenstore_domain_interface *xen_store_interface; EXPORT_SYMBOL_GPL(xen_store_interface); static unsigned long xen_store_mfn; static BLOCKING_NOTIFIER_HEAD(xenstore_chain); /* If something in array of ids matches this device, return it. */ static const struct xenbus_device_id * match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) { for (; *arr->devicetype != '\0'; arr++) { if (!strcmp(arr->devicetype, dev->devicetype)) return arr; } return NULL; } int xenbus_match(struct device *_dev, struct device_driver *_drv) { struct xenbus_driver *drv = to_xenbus_driver(_drv); if (!drv->ids) return 0; return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; } EXPORT_SYMBOL_GPL(xenbus_match); static void free_otherend_details(struct xenbus_device *dev) { kfree(dev->otherend); dev->otherend = NULL; } static void free_otherend_watch(struct xenbus_device *dev) { if (dev->otherend_watch.node) { unregister_xenbus_watch(&dev->otherend_watch); kfree(dev->otherend_watch.node); dev->otherend_watch.node = NULL; } } static int talk_to_otherend(struct xenbus_device *dev) { struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); free_otherend_watch(dev); free_otherend_details(dev); return drv->read_otherend_details(dev); } static int watch_otherend(struct xenbus_device *dev) { struct xen_bus_type *bus = container_of(dev->dev.bus, struct xen_bus_type, bus); return xenbus_watch_pathfmt(dev, &dev->otherend_watch, bus->otherend_changed, "%s/%s", dev->otherend, "state"); } int xenbus_read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node) { int err = xenbus_gather(XBT_NIL, xendev->nodename, id_node, "%i", &xendev->otherend_id, path_node, NULL, &xendev->otherend, NULL); if (err) { xenbus_dev_fatal(xendev, err, "reading other end details from %s", xendev->nodename); return err; } if (strlen(xendev->otherend) == 0 || !xenbus_exists(XBT_NIL, xendev->otherend, "")) { xenbus_dev_fatal(xendev, -ENOENT, "unable to read other end from %s. " "missing or inaccessible.", xendev->nodename); free_otherend_details(xendev); return -ENOENT; } return 0; } EXPORT_SYMBOL_GPL(xenbus_read_otherend_details); void xenbus_otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len, int ignore_on_shutdown) { struct xenbus_device *dev = container_of(watch, struct xenbus_device, otherend_watch); struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); enum xenbus_state state; /* Protect us against watches firing on old details when the otherend details change, say immediately after a resume. */ if (!dev->otherend || strncmp(dev->otherend, vec[XS_WATCH_PATH], strlen(dev->otherend))) { dev_dbg(&dev->dev, "Ignoring watch at %s\n", vec[XS_WATCH_PATH]); return; } state = xenbus_read_driver_state(dev->otherend); dev_dbg(&dev->dev, "state is %d, (%s), %s, %s\n", state, xenbus_strstate(state), dev->otherend_watch.node, vec[XS_WATCH_PATH]); /* * Ignore xenbus transitions during shutdown. This prevents us doing * work that can fail e.g., when the rootfs is gone. */ if (system_state > SYSTEM_RUNNING) { if (ignore_on_shutdown && (state == XenbusStateClosing)) xenbus_frontend_closed(dev); return; } if (drv->otherend_changed) drv->otherend_changed(dev, state); } EXPORT_SYMBOL_GPL(xenbus_otherend_changed); int xenbus_dev_probe(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); const struct xenbus_device_id *id; int err; DPRINTK("%s", dev->nodename); if (!drv->probe) { err = -ENODEV; goto fail; } id = match_device(drv->ids, dev); if (!id) { err = -ENODEV; goto fail; } err = talk_to_otherend(dev); if (err) { dev_warn(&dev->dev, "talk_to_otherend on %s failed.\n", dev->nodename); return err; } err = drv->probe(dev, id); if (err) goto fail; err = watch_otherend(dev); if (err) { dev_warn(&dev->dev, "watch_otherend on %s failed.\n", dev->nodename); return err; } return 0; fail: xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); xenbus_switch_state(dev, XenbusStateClosed); return err; } EXPORT_SYMBOL_GPL(xenbus_dev_probe); int xenbus_dev_remove(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); DPRINTK("%s", dev->nodename); free_otherend_watch(dev); if (drv->remove) drv->remove(dev); free_otherend_details(dev); xenbus_switch_state(dev, XenbusStateClosed); return 0; } EXPORT_SYMBOL_GPL(xenbus_dev_remove); void xenbus_dev_shutdown(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); unsigned long timeout = 5*HZ; DPRINTK("%s", dev->nodename); get_device(&dev->dev); if (dev->state != XenbusStateConnected) { printk(KERN_INFO "%s: %s: %s != Connected, skipping\n", __func__, dev->nodename, xenbus_strstate(dev->state)); goto out; } xenbus_switch_state(dev, XenbusStateClosing); timeout = wait_for_completion_timeout(&dev->down, timeout); if (!timeout) printk(KERN_INFO "%s: %s timeout closing device\n", __func__, dev->nodename); out: put_device(&dev->dev); } EXPORT_SYMBOL_GPL(xenbus_dev_shutdown); int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus) { drv->driver.bus = &bus->bus; return driver_register(&drv->driver); } EXPORT_SYMBOL_GPL(xenbus_register_driver_common); void xenbus_unregister_driver(struct xenbus_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(xenbus_unregister_driver); struct xb_find_info { struct xenbus_device *dev; const char *nodename; }; static int cmp_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; if (!strcmp(xendev->nodename, info->nodename)) { info->dev = xendev; get_device(dev); return 1; } return 0; } struct xenbus_device *xenbus_device_find(const char *nodename, struct bus_type *bus) { struct xb_find_info info = { .dev = NULL, .nodename = nodename }; bus_for_each_dev(bus, NULL, &info, cmp_dev); return info.dev; } static int cleanup_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; int len = strlen(info->nodename); DPRINTK("%s", info->nodename); /* Match the info->nodename path, or any subdirectory of that path. */ if (strncmp(xendev->nodename, info->nodename, len)) return 0; /* If the node name is longer, ensure it really is a subdirectory. */ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/')) return 0; info->dev = xendev; get_device(dev); return 1; } static void xenbus_cleanup_devices(const char *path, struct bus_type *bus) { struct xb_find_info info = { .nodename = path }; do { info.dev = NULL; bus_for_each_dev(bus, NULL, &info, cleanup_dev); if (info.dev) { device_unregister(&info.dev->dev); put_device(&info.dev->dev); } } while (info.dev); } static void xenbus_dev_release(struct device *dev) { if (dev) kfree(to_xenbus_device(dev)); } static ssize_t nodename_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); } static ssize_t devtype_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); } static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s:%s\n", dev->bus->name, to_xenbus_device(dev)->devicetype); } struct device_attribute xenbus_dev_attrs[] = { __ATTR_RO(nodename), __ATTR_RO(devtype), __ATTR_RO(modalias), __ATTR_NULL }; EXPORT_SYMBOL_GPL(xenbus_dev_attrs); int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename) { char devname[XEN_BUS_ID_SIZE]; int err; struct xenbus_device *xendev; size_t stringlen; char *tmpstring; enum xenbus_state state = xenbus_read_driver_state(nodename); if (state != XenbusStateInitialising) { /* Device is not new, so ignore it. This can happen if a device is going away after switching to Closed. */ return 0; } stringlen = strlen(nodename) + 1 + strlen(type) + 1; xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL); if (!xendev) return -ENOMEM; xendev->state = XenbusStateInitialising; /* Copy the strings into the extra space. */ tmpstring = (char *)(xendev + 1); strcpy(tmpstring, nodename); xendev->nodename = tmpstring; tmpstring += strlen(tmpstring) + 1; strcpy(tmpstring, type); xendev->devicetype = tmpstring; init_completion(&xendev->down); xendev->dev.bus = &bus->bus; xendev->dev.release = xenbus_dev_release; err = bus->get_bus_id(devname, xendev->nodename); if (err) goto fail; dev_set_name(&xendev->dev, devname); /* Register with generic device framework. */ err = device_register(&xendev->dev); if (err) goto fail; return 0; fail: kfree(xendev); return err; } EXPORT_SYMBOL_GPL(xenbus_probe_node); static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) { int err = 0; char **dir; unsigned int dir_n = 0; int i; dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = bus->probe(bus, type, dir[i]); if (err) break; } kfree(dir); return err; } int xenbus_probe_devices(struct xen_bus_type *bus) { int err = 0; char **dir; unsigned int i, dir_n; dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = xenbus_probe_device_type(bus, dir[i]); if (err) break; } kfree(dir); return err; } EXPORT_SYMBOL_GPL(xenbus_probe_devices); static unsigned int char_count(const char *str, char c) { unsigned int i, ret = 0; for (i = 0; str[i]; i++) if (str[i] == c) ret++; return ret; } static int strsep_len(const char *str, char c, unsigned int len) { unsigned int i; for (i = 0; str[i]; i++) if (str[i] == c) { if (len == 0) return i; len--; } return (len == 0) ? i : -ERANGE; } void xenbus_dev_changed(const char *node, struct xen_bus_type *bus) { int exists, rootlen; struct xenbus_device *dev; char type[XEN_BUS_ID_SIZE]; const char *p, *root; if (char_count(node, '/') < 2) return; exists = xenbus_exists(XBT_NIL, node, ""); if (!exists) { xenbus_cleanup_devices(node, &bus->bus); return; } /* backend/<type>/... or device/<type>/... */ p = strchr(node, '/') + 1; snprintf(type, XEN_BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p); type[XEN_BUS_ID_SIZE-1] = '\0'; rootlen = strsep_len(node, '/', bus->levels); if (rootlen < 0) return; root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node); if (!root) return; dev = xenbus_device_find(root, &bus->bus); if (!dev) xenbus_probe_node(bus, type, root); else put_device(&dev->dev); kfree(root); } EXPORT_SYMBOL_GPL(xenbus_dev_changed); int xenbus_dev_suspend(struct device *dev) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev = container_of(dev, struct xenbus_device, dev); DPRINTK("%s", xdev->nodename); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); if (drv->suspend) err = drv->suspend(xdev); if (err) printk(KERN_WARNING "xenbus: suspend %s failed: %i\n", dev_name(dev), err); return 0; } EXPORT_SYMBOL_GPL(xenbus_dev_suspend); int xenbus_dev_resume(struct device *dev) { int err; struct xenbus_driver *drv; struct xenbus_device *xdev = container_of(dev, struct xenbus_device, dev); DPRINTK("%s", xdev->nodename); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); err = talk_to_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus: resume (talk_to_otherend) %s failed: %i\n", dev_name(dev), err); return err; } xdev->state = XenbusStateInitialising; if (drv->resume) { err = drv->resume(xdev); if (err) { printk(KERN_WARNING "xenbus: resume %s failed: %i\n", dev_name(dev), err); return err; } } err = watch_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus_probe: resume (watch_otherend) %s failed: " "%d.\n", dev_name(dev), err); return err; } return 0; } EXPORT_SYMBOL_GPL(xenbus_dev_resume); int xenbus_dev_cancel(struct device *dev) { /* Do nothing */ DPRINTK("cancel"); return 0; } EXPORT_SYMBOL_GPL(xenbus_dev_cancel); /* A flag to determine if xenstored is 'ready' (i.e. has started) */ int xenstored_ready; int register_xenstore_notifier(struct notifier_block *nb) { int ret = 0; if (xenstored_ready > 0) ret = nb->notifier_call(nb, 0, NULL); else blocking_notifier_chain_register(&xenstore_chain, nb); return ret; } EXPORT_SYMBOL_GPL(register_xenstore_notifier); void unregister_xenstore_notifier(struct notifier_block *nb) { blocking_notifier_chain_unregister(&xenstore_chain, nb); } EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); void xenbus_probe(struct work_struct *unused) { xenstored_ready = 1; /* Notify others that xenstore is up */ blocking_notifier_call_chain(&xenstore_chain, 0, NULL); } EXPORT_SYMBOL_GPL(xenbus_probe); static int __init xenbus_probe_initcall(void) { if (!xen_domain()) return -ENODEV; if (xen_initial_domain() || xen_hvm_domain()) return 0; xenbus_probe(NULL); return 0; } device_initcall(xenbus_probe_initcall); /* Set up event channel for xenstored which is run as a local process * (this is normally used only in dom0) */ static int __init xenstored_local_init(void) { int err = 0; unsigned long page = 0; struct evtchn_alloc_unbound alloc_unbound; /* Allocate Xenstore page */ page = get_zeroed_page(GFP_KERNEL); if (!page) goto out_err; xen_store_mfn = xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >> PAGE_SHIFT); /* Next allocate a local port which xenstored can bind to */ alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = DOMID_SELF; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err == -ENOSYS) goto out_err; BUG_ON(err); xen_store_evtchn = xen_start_info->store_evtchn = alloc_unbound.port; return 0; out_err: if (page != 0) free_page(page); return err; } static int __init xenbus_init(void) { int err = 0; if (!xen_domain()) return -ENODEV; xenbus_ring_ops_init(); if (xen_hvm_domain()) { uint64_t v = 0; err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); if (err) goto out_error; xen_store_evtchn = (int)v; err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto out_error; xen_store_mfn = (unsigned long)v; xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); } else { xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; if (xen_store_evtchn) xenstored_ready = 1; else { err = xenstored_local_init(); if (err) goto out_error; } xen_store_interface = mfn_to_virt(xen_store_mfn); } /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { printk(KERN_WARNING "XENBUS: Error initializing xenstore comms: %i\n", err); goto out_error; } #ifdef CONFIG_XEN_COMPAT_XENFS /* * Create xenfs mountpoint in /proc for compatibility with * utilities that expect to find "xenbus" under "/proc/xen". */ proc_mkdir("xen", NULL); #endif out_error: return err; } postcore_initcall(xenbus_init); MODULE_LICENSE("GPL");
gpl-2.0
mp3deviant721/boeffla-kernel-cm-bacon-mod
drivers/pci/hotplug/fakephp.c
4940
3787
/* Works like the fakephp driver used to, except a little better. * * - It's possible to remove devices with subordinate busses. * - New PCI devices that appear via any method, not just a fakephp triggered * rescan, will be noticed. * - Devices that are removed via any method, not just a fakephp triggered * removal, will also be noticed. * * Uses nothing from the pci-hotplug subsystem. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/list.h> #include <linux/kobject.h> #include <linux/sysfs.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/slab.h> #include "../pci.h" struct legacy_slot { struct kobject kobj; struct pci_dev *dev; struct list_head list; }; static LIST_HEAD(legacy_list); static ssize_t legacy_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj); strcpy(buf, "1\n"); return 2; } static void remove_callback(void *data) { pci_stop_and_remove_bus_device((struct pci_dev *)data); } static ssize_t legacy_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj); unsigned long val; if (strict_strtoul(buf, 0, &val) < 0) return -EINVAL; if (val) pci_rescan_bus(slot->dev->bus); else sysfs_schedule_callback(&slot->dev->dev.kobj, remove_callback, slot->dev, THIS_MODULE); return len; } static struct attribute *legacy_attrs[] = { &(struct attribute){ .name = "power", .mode = 0644 }, NULL, }; static void legacy_release(struct kobject *kobj) { struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj); pci_dev_put(slot->dev); kfree(slot); } static struct kobj_type legacy_ktype = { .sysfs_ops = &(const struct sysfs_ops){ .store = legacy_store, .show = legacy_show }, .release = &legacy_release, .default_attrs = legacy_attrs, }; static int legacy_add_slot(struct pci_dev *pdev) { struct legacy_slot *slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) return -ENOMEM; if (kobject_init_and_add(&slot->kobj, &legacy_ktype, &pci_slots_kset->kobj, "%s", dev_name(&pdev->dev))) { dev_warn(&pdev->dev, "Failed to created legacy fake slot\n"); return -EINVAL; } slot->dev = pci_dev_get(pdev); list_add(&slot->list, &legacy_list); return 0; } static int legacy_notify(struct notifier_block *nb, unsigned long action, void *data) { struct pci_dev *pdev = to_pci_dev(data); if (action == BUS_NOTIFY_ADD_DEVICE) { legacy_add_slot(pdev); } else if (action == BUS_NOTIFY_DEL_DEVICE) { struct legacy_slot *slot; list_for_each_entry(slot, &legacy_list, list) if (slot->dev == pdev) goto found; dev_warn(&pdev->dev, "Missing legacy fake slot?"); return -ENODEV; found: kobject_del(&slot->kobj); list_del(&slot->list); kobject_put(&slot->kobj); } return 0; } static struct notifier_block legacy_notifier = { .notifier_call = legacy_notify }; static int __init init_legacy(void) { struct pci_dev *pdev = NULL; /* Add existing devices */ for_each_pci_dev(pdev) legacy_add_slot(pdev); /* Be alerted of any new ones */ bus_register_notifier(&pci_bus_type, &legacy_notifier); return 0; } module_init(init_legacy); static void __exit remove_legacy(void) { struct legacy_slot *slot, *tmp; bus_unregister_notifier(&pci_bus_type, &legacy_notifier); list_for_each_entry_safe(slot, tmp, &legacy_list, list) { list_del(&slot->list); kobject_del(&slot->kobj); kobject_put(&slot->kobj); } } module_exit(remove_legacy); MODULE_AUTHOR("Trent Piepho <xyzzy@speakeasy.org>"); MODULE_DESCRIPTION("Legacy version of the fakephp interface"); MODULE_LICENSE("GPL");
gpl-2.0
DeqingSun/Glass_kernel
drivers/video/w100fb.c
4940
48927
/* * linux/drivers/video/w100fb.c * * Frame Buffer Device for ATI Imageon w100 (Wallaby) * * Copyright (C) 2002, ATI Corp. * Copyright (C) 2004-2006 Richard Purdie * Copyright (c) 2005 Ian Molton * Copyright (c) 2006 Alberto Mardegan * * Rewritten for 2.6 by Richard Purdie <rpurdie@rpsys.net> * * Generic platform support by Ian Molton <spyro@f2s.com> * and Richard Purdie <rpurdie@rpsys.net> * * w32xx support by Ian Molton * * Hardware acceleration support by Alberto Mardegan * <mardy@users.sourceforge.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <asm/io.h> #include <asm/uaccess.h> #include <video/w100fb.h> #include "w100fb.h" /* * Prototypes */ static void w100_suspend(u32 mode); static void w100_vsync(void); static void w100_hw_init(struct w100fb_par*); static void w100_pwm_setup(struct w100fb_par*); static void w100_init_clocks(struct w100fb_par*); static void w100_setup_memory(struct w100fb_par*); static void w100_init_lcd(struct w100fb_par*); static void w100_set_dispregs(struct w100fb_par*); static void w100_update_enable(void); static void w100_update_disable(void); static void calc_hsync(struct w100fb_par *par); static void w100_init_graphic_engine(struct w100fb_par *par); struct w100_pll_info *w100_get_xtal_table(unsigned int freq) __devinit; /* Pseudo palette size */ #define MAX_PALETTES 16 #define W100_SUSPEND_EXTMEM 0 #define W100_SUSPEND_ALL 1 #define BITS_PER_PIXEL 16 /* Remapped addresses for base cfg, memmapped regs and the frame buffer itself */ static void *remapped_base; static void *remapped_regs; static void *remapped_fbuf; #define REMAPPED_FB_LEN 0x15ffff /* This is the offset in the w100's address space we map the current framebuffer memory to. We use the position of external memory as we can remap internal memory to there if external isn't present. */ #define W100_FB_BASE MEM_EXT_BASE_VALUE /* * Sysfs functions */ static ssize_t flip_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *info = dev_get_drvdata(dev); struct w100fb_par *par=info->par; return sprintf(buf, "%d\n",par->flip); } static ssize_t flip_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int flip; struct fb_info *info = dev_get_drvdata(dev); struct w100fb_par *par=info->par; flip = simple_strtoul(buf, NULL, 10); if (flip > 0) par->flip = 1; else par->flip = 0; w100_update_disable(); w100_set_dispregs(par); w100_update_enable(); calc_hsync(par); return count; } static DEVICE_ATTR(flip, 0644, flip_show, flip_store); static ssize_t w100fb_reg_read(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long regs, param; regs = simple_strtoul(buf, NULL, 16); param = readl(remapped_regs + regs); printk("Read Register 0x%08lX: 0x%08lX\n", regs, param); return count; } static DEVICE_ATTR(reg_read, 0200, NULL, w100fb_reg_read); static ssize_t w100fb_reg_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long regs, param; sscanf(buf, "%lx %lx", &regs, &param); if (regs <= 0x2000) { printk("Write Register 0x%08lX: 0x%08lX\n", regs, param); writel(param, remapped_regs + regs); } return count; } static DEVICE_ATTR(reg_write, 0200, NULL, w100fb_reg_write); static ssize_t fastpllclk_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *info = dev_get_drvdata(dev); struct w100fb_par *par=info->par; return sprintf(buf, "%d\n",par->fastpll_mode); } static ssize_t fastpllclk_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *info = dev_get_drvdata(dev); struct w100fb_par *par=info->par; if (simple_strtoul(buf, NULL, 10) > 0) { par->fastpll_mode=1; printk("w100fb: Using fast system clock (if possible)\n"); } else { par->fastpll_mode=0; printk("w100fb: Using normal system clock\n"); } w100_init_clocks(par); calc_hsync(par); return count; } static DEVICE_ATTR(fastpllclk, 0644, fastpllclk_show, fastpllclk_store); /* * Some touchscreens need hsync information from the video driver to * function correctly. We export it here. */ unsigned long w100fb_get_hsynclen(struct device *dev) { struct fb_info *info = dev_get_drvdata(dev); struct w100fb_par *par=info->par; /* If display is blanked/suspended, hsync isn't active */ if (par->blanked) return 0; else return par->hsync_len; } EXPORT_SYMBOL(w100fb_get_hsynclen); static void w100fb_clear_screen(struct w100fb_par *par) { memset_io(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), 0, (par->xres * par->yres * BITS_PER_PIXEL/8)); } /* * Set a palette value from rgb components */ static int w100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int trans, struct fb_info *info) { unsigned int val; int ret = 1; /* * If greyscale is true, then we convert the RGB value * to greyscale no matter what visual we are using. */ if (info->var.grayscale) red = green = blue = (19595 * red + 38470 * green + 7471 * blue) >> 16; /* * 16-bit True Colour. We encode the RGB value * according to the RGB bitfield information. */ if (regno < MAX_PALETTES) { u32 *pal = info->pseudo_palette; val = (red & 0xf800) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); pal[regno] = val; ret = 0; } return ret; } /* * Blank the display based on value in blank_mode */ static int w100fb_blank(int blank_mode, struct fb_info *info) { struct w100fb_par *par = info->par; struct w100_tg_info *tg = par->mach->tg; switch(blank_mode) { case FB_BLANK_NORMAL: /* Normal blanking */ case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */ case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */ case FB_BLANK_POWERDOWN: /* Poweroff */ if (par->blanked == 0) { if(tg && tg->suspend) tg->suspend(par); par->blanked = 1; } break; case FB_BLANK_UNBLANK: /* Unblanking */ if (par->blanked != 0) { if(tg && tg->resume) tg->resume(par); par->blanked = 0; } break; } return 0; } static void w100_fifo_wait(int entries) { union rbbm_status_u status; int i; for (i = 0; i < 2000000; i++) { status.val = readl(remapped_regs + mmRBBM_STATUS); if (status.f.cmdfifo_avail >= entries) return; udelay(1); } printk(KERN_ERR "w100fb: FIFO Timeout!\n"); } static int w100fb_sync(struct fb_info *info) { union rbbm_status_u status; int i; for (i = 0; i < 2000000; i++) { status.val = readl(remapped_regs + mmRBBM_STATUS); if (!status.f.gui_active) return 0; udelay(1); } printk(KERN_ERR "w100fb: Graphic engine timeout!\n"); return -EBUSY; } static void w100_init_graphic_engine(struct w100fb_par *par) { union dp_gui_master_cntl_u gmc; union dp_mix_u dp_mix; union dp_datatype_u dp_datatype; union dp_cntl_u dp_cntl; w100_fifo_wait(4); writel(W100_FB_BASE, remapped_regs + mmDST_OFFSET); writel(par->xres, remapped_regs + mmDST_PITCH); writel(W100_FB_BASE, remapped_regs + mmSRC_OFFSET); writel(par->xres, remapped_regs + mmSRC_PITCH); w100_fifo_wait(3); writel(0, remapped_regs + mmSC_TOP_LEFT); writel((par->yres << 16) | par->xres, remapped_regs + mmSC_BOTTOM_RIGHT); writel(0x1fff1fff, remapped_regs + mmSRC_SC_BOTTOM_RIGHT); w100_fifo_wait(4); dp_cntl.val = 0; dp_cntl.f.dst_x_dir = 1; dp_cntl.f.dst_y_dir = 1; dp_cntl.f.src_x_dir = 1; dp_cntl.f.src_y_dir = 1; dp_cntl.f.dst_major_x = 1; dp_cntl.f.src_major_x = 1; writel(dp_cntl.val, remapped_regs + mmDP_CNTL); gmc.val = 0; gmc.f.gmc_src_pitch_offset_cntl = 1; gmc.f.gmc_dst_pitch_offset_cntl = 1; gmc.f.gmc_src_clipping = 1; gmc.f.gmc_dst_clipping = 1; gmc.f.gmc_brush_datatype = GMC_BRUSH_NONE; gmc.f.gmc_dst_datatype = 3; /* from DstType_16Bpp_444 */ gmc.f.gmc_src_datatype = SRC_DATATYPE_EQU_DST; gmc.f.gmc_byte_pix_order = 1; gmc.f.gmc_default_sel = 0; gmc.f.gmc_rop3 = ROP3_SRCCOPY; gmc.f.gmc_dp_src_source = DP_SRC_MEM_RECTANGULAR; gmc.f.gmc_clr_cmp_fcn_dis = 1; gmc.f.gmc_wr_msk_dis = 1; gmc.f.gmc_dp_op = DP_OP_ROP; writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL); dp_datatype.val = dp_mix.val = 0; dp_datatype.f.dp_dst_datatype = gmc.f.gmc_dst_datatype; dp_datatype.f.dp_brush_datatype = gmc.f.gmc_brush_datatype; dp_datatype.f.dp_src2_type = 0; dp_datatype.f.dp_src2_datatype = gmc.f.gmc_src_datatype; dp_datatype.f.dp_src_datatype = gmc.f.gmc_src_datatype; dp_datatype.f.dp_byte_pix_order = gmc.f.gmc_byte_pix_order; writel(dp_datatype.val, remapped_regs + mmDP_DATATYPE); dp_mix.f.dp_src_source = gmc.f.gmc_dp_src_source; dp_mix.f.dp_src2_source = 1; dp_mix.f.dp_rop3 = gmc.f.gmc_rop3; dp_mix.f.dp_op = gmc.f.gmc_dp_op; writel(dp_mix.val, remapped_regs + mmDP_MIX); } static void w100fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { union dp_gui_master_cntl_u gmc; if (info->state != FBINFO_STATE_RUNNING) return; if (info->flags & FBINFO_HWACCEL_DISABLED) { cfb_fillrect(info, rect); return; } gmc.val = readl(remapped_regs + mmDP_GUI_MASTER_CNTL); gmc.f.gmc_rop3 = ROP3_PATCOPY; gmc.f.gmc_brush_datatype = GMC_BRUSH_SOLID_COLOR; w100_fifo_wait(2); writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL); writel(rect->color, remapped_regs + mmDP_BRUSH_FRGD_CLR); w100_fifo_wait(2); writel((rect->dy << 16) | (rect->dx & 0xffff), remapped_regs + mmDST_Y_X); writel((rect->width << 16) | (rect->height & 0xffff), remapped_regs + mmDST_WIDTH_HEIGHT); } static void w100fb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy; u32 h = area->height, w = area->width; union dp_gui_master_cntl_u gmc; if (info->state != FBINFO_STATE_RUNNING) return; if (info->flags & FBINFO_HWACCEL_DISABLED) { cfb_copyarea(info, area); return; } gmc.val = readl(remapped_regs + mmDP_GUI_MASTER_CNTL); gmc.f.gmc_rop3 = ROP3_SRCCOPY; gmc.f.gmc_brush_datatype = GMC_BRUSH_NONE; w100_fifo_wait(1); writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL); w100_fifo_wait(3); writel((sy << 16) | (sx & 0xffff), remapped_regs + mmSRC_Y_X); writel((dy << 16) | (dx & 0xffff), remapped_regs + mmDST_Y_X); writel((w << 16) | (h & 0xffff), remapped_regs + mmDST_WIDTH_HEIGHT); } /* * Change the resolution by calling the appropriate hardware functions */ static void w100fb_activate_var(struct w100fb_par *par) { struct w100_tg_info *tg = par->mach->tg; w100_pwm_setup(par); w100_setup_memory(par); w100_init_clocks(par); w100fb_clear_screen(par); w100_vsync(); w100_update_disable(); w100_init_lcd(par); w100_set_dispregs(par); w100_update_enable(); w100_init_graphic_engine(par); calc_hsync(par); if (!par->blanked && tg && tg->change) tg->change(par); } /* Select the smallest mode that allows the desired resolution to be * displayed. If desired, the x and y parameters can be rounded up to * match the selected mode. */ static struct w100_mode *w100fb_get_mode(struct w100fb_par *par, unsigned int *x, unsigned int *y, int saveval) { struct w100_mode *mode = NULL; struct w100_mode *modelist = par->mach->modelist; unsigned int best_x = 0xffffffff, best_y = 0xffffffff; unsigned int i; for (i = 0 ; i < par->mach->num_modes ; i++) { if (modelist[i].xres >= *x && modelist[i].yres >= *y && modelist[i].xres < best_x && modelist[i].yres < best_y) { best_x = modelist[i].xres; best_y = modelist[i].yres; mode = &modelist[i]; } else if(modelist[i].xres >= *y && modelist[i].yres >= *x && modelist[i].xres < best_y && modelist[i].yres < best_x) { best_x = modelist[i].yres; best_y = modelist[i].xres; mode = &modelist[i]; } } if (mode && saveval) { *x = best_x; *y = best_y; } return mode; } /* * w100fb_check_var(): * Get the video params out of 'var'. If a value doesn't fit, round it up, * if it's too big, return -EINVAL. */ static int w100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct w100fb_par *par=info->par; if(!w100fb_get_mode(par, &var->xres, &var->yres, 1)) return -EINVAL; if (par->mach->mem && ((var->xres*var->yres*BITS_PER_PIXEL/8) > (par->mach->mem->size+1))) return -EINVAL; if (!par->mach->mem && ((var->xres*var->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1))) return -EINVAL; var->xres_virtual = max(var->xres_virtual, var->xres); var->yres_virtual = max(var->yres_virtual, var->yres); if (var->bits_per_pixel > BITS_PER_PIXEL) return -EINVAL; else var->bits_per_pixel = BITS_PER_PIXEL; var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = var->transp.length = 0; var->nonstd = 0; var->height = -1; var->width = -1; var->vmode = FB_VMODE_NONINTERLACED; var->sync = 0; var->pixclock = 0x04; /* 171521; */ return 0; } /* * w100fb_set_par(): * Set the user defined part of the display for the specified console * by looking at the values in info.var */ static int w100fb_set_par(struct fb_info *info) { struct w100fb_par *par=info->par; if (par->xres != info->var.xres || par->yres != info->var.yres) { par->xres = info->var.xres; par->yres = info->var.yres; par->mode = w100fb_get_mode(par, &par->xres, &par->yres, 0); info->fix.visual = FB_VISUAL_TRUECOLOR; info->fix.ypanstep = 0; info->fix.ywrapstep = 0; info->fix.line_length = par->xres * BITS_PER_PIXEL / 8; mutex_lock(&info->mm_lock); if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) { par->extmem_active = 1; info->fix.smem_len = par->mach->mem->size+1; } else { par->extmem_active = 0; info->fix.smem_len = MEM_INT_SIZE+1; } mutex_unlock(&info->mm_lock); w100fb_activate_var(par); } return 0; } /* * Frame buffer operations */ static struct fb_ops w100fb_ops = { .owner = THIS_MODULE, .fb_check_var = w100fb_check_var, .fb_set_par = w100fb_set_par, .fb_setcolreg = w100fb_setcolreg, .fb_blank = w100fb_blank, .fb_fillrect = w100fb_fillrect, .fb_copyarea = w100fb_copyarea, .fb_imageblit = cfb_imageblit, .fb_sync = w100fb_sync, }; #ifdef CONFIG_PM static void w100fb_save_vidmem(struct w100fb_par *par) { int memsize; if (par->extmem_active) { memsize=par->mach->mem->size; par->saved_extmem = vmalloc(memsize); if (par->saved_extmem) memcpy_fromio(par->saved_extmem, remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), memsize); } memsize=MEM_INT_SIZE; par->saved_intmem = vmalloc(memsize); if (par->saved_intmem && par->extmem_active) memcpy_fromio(par->saved_intmem, remapped_fbuf + (W100_FB_BASE-MEM_INT_BASE_VALUE), memsize); else if (par->saved_intmem) memcpy_fromio(par->saved_intmem, remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), memsize); } static void w100fb_restore_vidmem(struct w100fb_par *par) { int memsize; if (par->extmem_active && par->saved_extmem) { memsize=par->mach->mem->size; memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize); vfree(par->saved_extmem); } if (par->saved_intmem) { memsize=MEM_INT_SIZE; if (par->extmem_active) memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_INT_BASE_VALUE), par->saved_intmem, memsize); else memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize); vfree(par->saved_intmem); } } static int w100fb_suspend(struct platform_device *dev, pm_message_t state) { struct fb_info *info = platform_get_drvdata(dev); struct w100fb_par *par=info->par; struct w100_tg_info *tg = par->mach->tg; w100fb_save_vidmem(par); if(tg && tg->suspend) tg->suspend(par); w100_suspend(W100_SUSPEND_ALL); par->blanked = 1; return 0; } static int w100fb_resume(struct platform_device *dev) { struct fb_info *info = platform_get_drvdata(dev); struct w100fb_par *par=info->par; struct w100_tg_info *tg = par->mach->tg; w100_hw_init(par); w100fb_activate_var(par); w100fb_restore_vidmem(par); if(tg && tg->resume) tg->resume(par); par->blanked = 0; return 0; } #else #define w100fb_suspend NULL #define w100fb_resume NULL #endif int __devinit w100fb_probe(struct platform_device *pdev) { int err = -EIO; struct w100fb_mach_info *inf; struct fb_info *info = NULL; struct w100fb_par *par; struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); unsigned int chip_id; if (!mem) return -EINVAL; /* Remap the chip base address */ remapped_base = ioremap_nocache(mem->start+W100_CFG_BASE, W100_CFG_LEN); if (remapped_base == NULL) goto out; /* Map the register space */ remapped_regs = ioremap_nocache(mem->start+W100_REG_BASE, W100_REG_LEN); if (remapped_regs == NULL) goto out; /* Identify the chip */ printk("Found "); chip_id = readl(remapped_regs + mmCHIP_ID); switch(chip_id) { case CHIP_ID_W100: printk("w100"); break; case CHIP_ID_W3200: printk("w3200"); break; case CHIP_ID_W3220: printk("w3220"); break; default: printk("Unknown imageon chip ID\n"); err = -ENODEV; goto out; } printk(" at 0x%08lx.\n", (unsigned long) mem->start+W100_CFG_BASE); /* Remap the framebuffer */ remapped_fbuf = ioremap_nocache(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE); if (remapped_fbuf == NULL) goto out; info=framebuffer_alloc(sizeof(struct w100fb_par), &pdev->dev); if (!info) { err = -ENOMEM; goto out; } par = info->par; platform_set_drvdata(pdev, info); inf = pdev->dev.platform_data; par->chip_id = chip_id; par->mach = inf; par->fastpll_mode = 0; par->blanked = 0; par->pll_table=w100_get_xtal_table(inf->xtal_freq); if (!par->pll_table) { printk(KERN_ERR "No matching Xtal definition found\n"); err = -EINVAL; goto out; } info->pseudo_palette = kmalloc(sizeof (u32) * MAX_PALETTES, GFP_KERNEL); if (!info->pseudo_palette) { err = -ENOMEM; goto out; } info->fbops = &w100fb_ops; info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; info->node = -1; info->screen_base = remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE); info->screen_size = REMAPPED_FB_LEN; strcpy(info->fix.id, "w100fb"); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.type_aux = 0; info->fix.accel = FB_ACCEL_NONE; info->fix.smem_start = mem->start+W100_FB_BASE; info->fix.mmio_start = mem->start+W100_REG_BASE; info->fix.mmio_len = W100_REG_LEN; if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { err = -ENOMEM; goto out; } par->mode = &inf->modelist[0]; if(inf->init_mode & INIT_MODE_ROTATED) { info->var.xres = par->mode->yres; info->var.yres = par->mode->xres; } else { info->var.xres = par->mode->xres; info->var.yres = par->mode->yres; } if(inf->init_mode &= INIT_MODE_FLIPPED) par->flip = 1; else par->flip = 0; info->var.xres_virtual = info->var.xres; info->var.yres_virtual = info->var.yres; info->var.pixclock = 0x04; /* 171521; */ info->var.sync = 0; info->var.grayscale = 0; info->var.xoffset = info->var.yoffset = 0; info->var.accel_flags = 0; info->var.activate = FB_ACTIVATE_NOW; w100_hw_init(par); if (w100fb_check_var(&info->var, info) < 0) { err = -EINVAL; goto out; } if (register_framebuffer(info) < 0) { err = -EINVAL; goto out; } err = device_create_file(&pdev->dev, &dev_attr_fastpllclk); err |= device_create_file(&pdev->dev, &dev_attr_reg_read); err |= device_create_file(&pdev->dev, &dev_attr_reg_write); err |= device_create_file(&pdev->dev, &dev_attr_flip); if (err != 0) printk(KERN_WARNING "fb%d: failed to register attributes (%d)\n", info->node, err); printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id); return 0; out: if (info) { fb_dealloc_cmap(&info->cmap); kfree(info->pseudo_palette); } if (remapped_fbuf != NULL) iounmap(remapped_fbuf); if (remapped_regs != NULL) iounmap(remapped_regs); if (remapped_base != NULL) iounmap(remapped_base); if (info) framebuffer_release(info); return err; } static int __devexit w100fb_remove(struct platform_device *pdev) { struct fb_info *info = platform_get_drvdata(pdev); struct w100fb_par *par=info->par; device_remove_file(&pdev->dev, &dev_attr_fastpllclk); device_remove_file(&pdev->dev, &dev_attr_reg_read); device_remove_file(&pdev->dev, &dev_attr_reg_write); device_remove_file(&pdev->dev, &dev_attr_flip); unregister_framebuffer(info); vfree(par->saved_intmem); vfree(par->saved_extmem); kfree(info->pseudo_palette); fb_dealloc_cmap(&info->cmap); iounmap(remapped_base); iounmap(remapped_regs); iounmap(remapped_fbuf); framebuffer_release(info); return 0; } /* ------------------- chipset specific functions -------------------------- */ static void w100_soft_reset(void) { u16 val = readw((u16 *) remapped_base + cfgSTATUS); writew(val | 0x08, (u16 *) remapped_base + cfgSTATUS); udelay(100); writew(0x00, (u16 *) remapped_base + cfgSTATUS); udelay(100); } static void w100_update_disable(void) { union disp_db_buf_cntl_wr_u disp_db_buf_wr_cntl; /* Prevent display updates */ disp_db_buf_wr_cntl.f.db_buf_cntl = 0x1e; disp_db_buf_wr_cntl.f.update_db_buf = 0; disp_db_buf_wr_cntl.f.en_db_buf = 0; writel((u32) (disp_db_buf_wr_cntl.val), remapped_regs + mmDISP_DB_BUF_CNTL); } static void w100_update_enable(void) { union disp_db_buf_cntl_wr_u disp_db_buf_wr_cntl; /* Enable display updates */ disp_db_buf_wr_cntl.f.db_buf_cntl = 0x1e; disp_db_buf_wr_cntl.f.update_db_buf = 1; disp_db_buf_wr_cntl.f.en_db_buf = 1; writel((u32) (disp_db_buf_wr_cntl.val), remapped_regs + mmDISP_DB_BUF_CNTL); } unsigned long w100fb_gpio_read(int port) { unsigned long value; if (port==W100_GPIO_PORT_A) value = readl(remapped_regs + mmGPIO_DATA); else value = readl(remapped_regs + mmGPIO_DATA2); return value; } void w100fb_gpio_write(int port, unsigned long value) { if (port==W100_GPIO_PORT_A) writel(value, remapped_regs + mmGPIO_DATA); else writel(value, remapped_regs + mmGPIO_DATA2); } EXPORT_SYMBOL(w100fb_gpio_read); EXPORT_SYMBOL(w100fb_gpio_write); /* * Initialization of critical w100 hardware */ static void w100_hw_init(struct w100fb_par *par) { u32 temp32; union cif_cntl_u cif_cntl; union intf_cntl_u intf_cntl; union cfgreg_base_u cfgreg_base; union wrap_top_dir_u wrap_top_dir; union cif_read_dbg_u cif_read_dbg; union cpu_defaults_u cpu_default; union cif_write_dbg_u cif_write_dbg; union wrap_start_dir_u wrap_start_dir; union cif_io_u cif_io; struct w100_gpio_regs *gpio = par->mach->gpio; w100_soft_reset(); /* This is what the fpga_init code does on reset. May be wrong but there is little info available */ writel(0x31, remapped_regs + mmSCRATCH_UMSK); for (temp32 = 0; temp32 < 10000; temp32++) readl(remapped_regs + mmSCRATCH_UMSK); writel(0x30, remapped_regs + mmSCRATCH_UMSK); /* Set up CIF */ cif_io.val = defCIF_IO; writel((u32)(cif_io.val), remapped_regs + mmCIF_IO); cif_write_dbg.val = readl(remapped_regs + mmCIF_WRITE_DBG); cif_write_dbg.f.dis_packer_ful_during_rbbm_timeout = 0; cif_write_dbg.f.en_dword_split_to_rbbm = 1; cif_write_dbg.f.dis_timeout_during_rbbm = 1; writel((u32) (cif_write_dbg.val), remapped_regs + mmCIF_WRITE_DBG); cif_read_dbg.val = readl(remapped_regs + mmCIF_READ_DBG); cif_read_dbg.f.dis_rd_same_byte_to_trig_fetch = 1; writel((u32) (cif_read_dbg.val), remapped_regs + mmCIF_READ_DBG); cif_cntl.val = readl(remapped_regs + mmCIF_CNTL); cif_cntl.f.dis_system_bits = 1; cif_cntl.f.dis_mr = 1; cif_cntl.f.en_wait_to_compensate_dq_prop_dly = 0; cif_cntl.f.intb_oe = 1; cif_cntl.f.interrupt_active_high = 1; writel((u32) (cif_cntl.val), remapped_regs + mmCIF_CNTL); /* Setup cfgINTF_CNTL and cfgCPU defaults */ intf_cntl.val = defINTF_CNTL; intf_cntl.f.ad_inc_a = 1; intf_cntl.f.ad_inc_b = 1; intf_cntl.f.rd_data_rdy_a = 0; intf_cntl.f.rd_data_rdy_b = 0; writeb((u8) (intf_cntl.val), remapped_base + cfgINTF_CNTL); cpu_default.val = defCPU_DEFAULTS; cpu_default.f.access_ind_addr_a = 1; cpu_default.f.access_ind_addr_b = 1; cpu_default.f.access_scratch_reg = 1; cpu_default.f.transition_size = 0; writeb((u8) (cpu_default.val), remapped_base + cfgCPU_DEFAULTS); /* set up the apertures */ writeb((u8) (W100_REG_BASE >> 16), remapped_base + cfgREG_BASE); cfgreg_base.val = defCFGREG_BASE; cfgreg_base.f.cfgreg_base = W100_CFG_BASE; writel((u32) (cfgreg_base.val), remapped_regs + mmCFGREG_BASE); wrap_start_dir.val = defWRAP_START_DIR; wrap_start_dir.f.start_addr = WRAP_BUF_BASE_VALUE >> 1; writel((u32) (wrap_start_dir.val), remapped_regs + mmWRAP_START_DIR); wrap_top_dir.val = defWRAP_TOP_DIR; wrap_top_dir.f.top_addr = WRAP_BUF_TOP_VALUE >> 1; writel((u32) (wrap_top_dir.val), remapped_regs + mmWRAP_TOP_DIR); writel((u32) 0x2440, remapped_regs + mmRBBM_CNTL); /* Set the hardware to 565 colour */ temp32 = readl(remapped_regs + mmDISP_DEBUG2); temp32 &= 0xff7fffff; temp32 |= 0x00800000; writel(temp32, remapped_regs + mmDISP_DEBUG2); /* Initialise the GPIO lines */ if (gpio) { writel(gpio->init_data1, remapped_regs + mmGPIO_DATA); writel(gpio->init_data2, remapped_regs + mmGPIO_DATA2); writel(gpio->gpio_dir1, remapped_regs + mmGPIO_CNTL1); writel(gpio->gpio_oe1, remapped_regs + mmGPIO_CNTL2); writel(gpio->gpio_dir2, remapped_regs + mmGPIO_CNTL3); writel(gpio->gpio_oe2, remapped_regs + mmGPIO_CNTL4); } } struct power_state { union clk_pin_cntl_u clk_pin_cntl; union pll_ref_fb_div_u pll_ref_fb_div; union pll_cntl_u pll_cntl; union sclk_cntl_u sclk_cntl; union pclk_cntl_u pclk_cntl; union pwrmgt_cntl_u pwrmgt_cntl; int auto_mode; /* system clock auto changing? */ }; static struct power_state w100_pwr_state; /* The PLL Fout is determined by (XtalFreq/(M+1)) * ((N_int+1) + (N_fac/8)) */ /* 12.5MHz Crystal PLL Table */ static struct w100_pll_info xtal_12500000[] = { /*freq M N_int N_fac tfgoal lock_time */ { 50, 0, 1, 0, 0xe0, 56}, /* 50.00 MHz */ { 75, 0, 5, 0, 0xde, 37}, /* 75.00 MHz */ {100, 0, 7, 0, 0xe0, 28}, /* 100.00 MHz */ {125, 0, 9, 0, 0xe0, 22}, /* 125.00 MHz */ {150, 0, 11, 0, 0xe0, 17}, /* 150.00 MHz */ { 0, 0, 0, 0, 0, 0}, /* Terminator */ }; /* 14.318MHz Crystal PLL Table */ static struct w100_pll_info xtal_14318000[] = { /*freq M N_int N_fac tfgoal lock_time */ { 40, 4, 13, 0, 0xe0, 80}, /* tfgoal guessed */ { 50, 1, 6, 0, 0xe0, 64}, /* 50.05 MHz */ { 57, 2, 11, 0, 0xe0, 53}, /* tfgoal guessed */ { 75, 0, 4, 3, 0xe0, 43}, /* 75.08 MHz */ {100, 0, 6, 0, 0xe0, 32}, /* 100.10 MHz */ { 0, 0, 0, 0, 0, 0}, }; /* 16MHz Crystal PLL Table */ static struct w100_pll_info xtal_16000000[] = { /*freq M N_int N_fac tfgoal lock_time */ { 72, 1, 8, 0, 0xe0, 48}, /* tfgoal guessed */ { 80, 1, 9, 0, 0xe0, 13}, /* tfgoal guessed */ { 95, 1, 10, 7, 0xe0, 38}, /* tfgoal guessed */ { 96, 1, 11, 0, 0xe0, 36}, /* tfgoal guessed */ { 0, 0, 0, 0, 0, 0}, }; static struct pll_entries { int xtal_freq; struct w100_pll_info *pll_table; } w100_pll_tables[] = { { 12500000, &xtal_12500000[0] }, { 14318000, &xtal_14318000[0] }, { 16000000, &xtal_16000000[0] }, { 0 }, }; struct w100_pll_info __devinit *w100_get_xtal_table(unsigned int freq) { struct pll_entries *pll_entry = w100_pll_tables; do { if (freq == pll_entry->xtal_freq) return pll_entry->pll_table; pll_entry++; } while (pll_entry->xtal_freq); return 0; } static unsigned int w100_get_testcount(unsigned int testclk_sel) { union clk_test_cntl_u clk_test_cntl; udelay(5); /* Select the test clock source and reset */ clk_test_cntl.f.start_check_freq = 0x0; clk_test_cntl.f.testclk_sel = testclk_sel; clk_test_cntl.f.tstcount_rst = 0x1; /* set reset */ writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL); clk_test_cntl.f.tstcount_rst = 0x0; /* clear reset */ writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL); /* Run clock test */ clk_test_cntl.f.start_check_freq = 0x1; writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL); /* Give the test time to complete */ udelay(20); /* Return the result */ clk_test_cntl.val = readl(remapped_regs + mmCLK_TEST_CNTL); clk_test_cntl.f.start_check_freq = 0x0; writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL); return clk_test_cntl.f.test_count; } static int w100_pll_adjust(struct w100_pll_info *pll) { unsigned int tf80; unsigned int tf20; /* Initial Settings */ w100_pwr_state.pll_cntl.f.pll_pwdn = 0x0; /* power down */ w100_pwr_state.pll_cntl.f.pll_reset = 0x0; /* not reset */ w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x1; /* Hi-Z */ w100_pwr_state.pll_cntl.f.pll_pvg = 0x0; /* VCO gain = 0 */ w100_pwr_state.pll_cntl.f.pll_vcofr = 0x0; /* VCO frequency range control = off */ w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; /* current offset inside VCO = 0 */ w100_pwr_state.pll_cntl.f.pll_ring_off = 0x0; /* Wai Ming 80 percent of VDD 1.3V gives 1.04V, minimum operating voltage is 1.08V * therefore, commented out the following lines * tf80 meant tf100 */ do { /* set VCO input = 0.8 * VDD */ w100_pwr_state.pll_cntl.f.pll_dactal = 0xd; writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); tf80 = w100_get_testcount(TESTCLK_SRC_PLL); if (tf80 >= (pll->tfgoal)) { /* set VCO input = 0.2 * VDD */ w100_pwr_state.pll_cntl.f.pll_dactal = 0x7; writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); tf20 = w100_get_testcount(TESTCLK_SRC_PLL); if (tf20 <= (pll->tfgoal)) return 1; /* Success */ if ((w100_pwr_state.pll_cntl.f.pll_vcofr == 0x0) && ((w100_pwr_state.pll_cntl.f.pll_pvg == 0x7) || (w100_pwr_state.pll_cntl.f.pll_ioffset == 0x0))) { /* slow VCO config */ w100_pwr_state.pll_cntl.f.pll_vcofr = 0x1; w100_pwr_state.pll_cntl.f.pll_pvg = 0x0; w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; continue; } } if ((w100_pwr_state.pll_cntl.f.pll_ioffset) < 0x3) { w100_pwr_state.pll_cntl.f.pll_ioffset += 0x1; } else if ((w100_pwr_state.pll_cntl.f.pll_pvg) < 0x7) { w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; w100_pwr_state.pll_cntl.f.pll_pvg += 0x1; } else { return 0; /* Error */ } } while(1); } /* * w100_pll_calibration */ static int w100_pll_calibration(struct w100_pll_info *pll) { int status; status = w100_pll_adjust(pll); /* PLL Reset And Lock */ /* set VCO input = 0.5 * VDD */ w100_pwr_state.pll_cntl.f.pll_dactal = 0xa; writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); udelay(1); /* reset time */ /* enable charge pump */ w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x0; /* normal */ writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); /* set VCO input = Hi-Z, disable DAC */ w100_pwr_state.pll_cntl.f.pll_dactal = 0x0; writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); udelay(400); /* lock time */ /* PLL locked */ return status; } static int w100_pll_set_clk(struct w100_pll_info *pll) { int status; if (w100_pwr_state.auto_mode == 1) /* auto mode */ { w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x0; /* disable fast to normal */ w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x0; /* disable normal to fast */ writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL); } /* Set system clock source to XTAL whilst adjusting the PLL! */ w100_pwr_state.sclk_cntl.f.sclk_src_sel = CLK_SRC_XTAL; writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL); w100_pwr_state.pll_ref_fb_div.f.pll_ref_div = pll->M; w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_int = pll->N_int; w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_frac = pll->N_fac; w100_pwr_state.pll_ref_fb_div.f.pll_lock_time = pll->lock_time; writel((u32) (w100_pwr_state.pll_ref_fb_div.val), remapped_regs + mmPLL_REF_FB_DIV); w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0; writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL); status = w100_pll_calibration(pll); if (w100_pwr_state.auto_mode == 1) /* auto mode */ { w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x1; /* reenable fast to normal */ w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x1; /* reenable normal to fast */ writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL); } return status; } /* freq = target frequency of the PLL */ static int w100_set_pll_freq(struct w100fb_par *par, unsigned int freq) { struct w100_pll_info *pll = par->pll_table; do { if (freq == pll->freq) { return w100_pll_set_clk(pll); } pll++; } while(pll->freq); return 0; } /* Set up an initial state. Some values/fields set here will be overwritten. */ static void w100_pwm_setup(struct w100fb_par *par) { w100_pwr_state.clk_pin_cntl.f.osc_en = 0x1; w100_pwr_state.clk_pin_cntl.f.osc_gain = 0x1f; w100_pwr_state.clk_pin_cntl.f.dont_use_xtalin = 0x0; w100_pwr_state.clk_pin_cntl.f.xtalin_pm_en = 0x0; w100_pwr_state.clk_pin_cntl.f.xtalin_dbl_en = par->mach->xtal_dbl ? 1 : 0; w100_pwr_state.clk_pin_cntl.f.cg_debug = 0x0; writel((u32) (w100_pwr_state.clk_pin_cntl.val), remapped_regs + mmCLK_PIN_CNTL); w100_pwr_state.sclk_cntl.f.sclk_src_sel = CLK_SRC_XTAL; w100_pwr_state.sclk_cntl.f.sclk_post_div_fast = 0x0; /* Pfast = 1 */ w100_pwr_state.sclk_cntl.f.sclk_clkon_hys = 0x3; w100_pwr_state.sclk_cntl.f.sclk_post_div_slow = 0x0; /* Pslow = 1 */ w100_pwr_state.sclk_cntl.f.disp_cg_ok2switch_en = 0x0; w100_pwr_state.sclk_cntl.f.sclk_force_reg = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.sclk_force_disp = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.sclk_force_mc = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.sclk_force_extmc = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.sclk_force_cp = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.sclk_force_e2 = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.sclk_force_e3 = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.sclk_force_idct = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.sclk_force_bist = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.busy_extend_cp = 0x0; w100_pwr_state.sclk_cntl.f.busy_extend_e2 = 0x0; w100_pwr_state.sclk_cntl.f.busy_extend_e3 = 0x0; w100_pwr_state.sclk_cntl.f.busy_extend_idct = 0x0; writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL); w100_pwr_state.pclk_cntl.f.pclk_src_sel = CLK_SRC_XTAL; w100_pwr_state.pclk_cntl.f.pclk_post_div = 0x1; /* P = 2 */ w100_pwr_state.pclk_cntl.f.pclk_force_disp = 0x0; /* Dynamic */ writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL); w100_pwr_state.pll_ref_fb_div.f.pll_ref_div = 0x0; /* M = 1 */ w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_int = 0x0; /* N = 1.0 */ w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_frac = 0x0; w100_pwr_state.pll_ref_fb_div.f.pll_reset_time = 0x5; w100_pwr_state.pll_ref_fb_div.f.pll_lock_time = 0xff; writel((u32) (w100_pwr_state.pll_ref_fb_div.val), remapped_regs + mmPLL_REF_FB_DIV); w100_pwr_state.pll_cntl.f.pll_pwdn = 0x1; w100_pwr_state.pll_cntl.f.pll_reset = 0x1; w100_pwr_state.pll_cntl.f.pll_pm_en = 0x0; w100_pwr_state.pll_cntl.f.pll_mode = 0x0; /* uses VCO clock */ w100_pwr_state.pll_cntl.f.pll_refclk_sel = 0x0; w100_pwr_state.pll_cntl.f.pll_fbclk_sel = 0x0; w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x0; w100_pwr_state.pll_cntl.f.pll_pcp = 0x4; w100_pwr_state.pll_cntl.f.pll_pvg = 0x0; w100_pwr_state.pll_cntl.f.pll_vcofr = 0x0; w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; w100_pwr_state.pll_cntl.f.pll_pecc_mode = 0x0; w100_pwr_state.pll_cntl.f.pll_pecc_scon = 0x0; w100_pwr_state.pll_cntl.f.pll_dactal = 0x0; /* Hi-Z */ w100_pwr_state.pll_cntl.f.pll_cp_clip = 0x3; w100_pwr_state.pll_cntl.f.pll_conf = 0x2; w100_pwr_state.pll_cntl.f.pll_mbctrl = 0x2; w100_pwr_state.pll_cntl.f.pll_ring_off = 0x0; writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); w100_pwr_state.pwrmgt_cntl.f.pwm_enable = 0x0; w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0x1; /* normal mode (0, 1, 3) */ w100_pwr_state.pwrmgt_cntl.f.pwm_wakeup_cond = 0x0; w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x0; w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x0; w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_cond = 0x1; /* PM4,ENG */ w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_cond = 0x1; /* PM4,ENG */ w100_pwr_state.pwrmgt_cntl.f.pwm_idle_timer = 0xFF; w100_pwr_state.pwrmgt_cntl.f.pwm_busy_timer = 0xFF; writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL); w100_pwr_state.auto_mode = 0; /* manual mode */ } /* * Setup the w100 clocks for the specified mode */ static void w100_init_clocks(struct w100fb_par *par) { struct w100_mode *mode = par->mode; if (mode->pixclk_src == CLK_SRC_PLL || mode->sysclk_src == CLK_SRC_PLL) w100_set_pll_freq(par, (par->fastpll_mode && mode->fast_pll_freq) ? mode->fast_pll_freq : mode->pll_freq); w100_pwr_state.sclk_cntl.f.sclk_src_sel = mode->sysclk_src; w100_pwr_state.sclk_cntl.f.sclk_post_div_fast = mode->sysclk_divider; w100_pwr_state.sclk_cntl.f.sclk_post_div_slow = mode->sysclk_divider; writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL); } static void w100_init_lcd(struct w100fb_par *par) { u32 temp32; struct w100_mode *mode = par->mode; struct w100_gen_regs *regs = par->mach->regs; union active_h_disp_u active_h_disp; union active_v_disp_u active_v_disp; union graphic_h_disp_u graphic_h_disp; union graphic_v_disp_u graphic_v_disp; union crtc_total_u crtc_total; /* w3200 doesn't like undefined bits being set so zero register values first */ active_h_disp.val = 0; active_h_disp.f.active_h_start=mode->left_margin; active_h_disp.f.active_h_end=mode->left_margin + mode->xres; writel(active_h_disp.val, remapped_regs + mmACTIVE_H_DISP); active_v_disp.val = 0; active_v_disp.f.active_v_start=mode->upper_margin; active_v_disp.f.active_v_end=mode->upper_margin + mode->yres; writel(active_v_disp.val, remapped_regs + mmACTIVE_V_DISP); graphic_h_disp.val = 0; graphic_h_disp.f.graphic_h_start=mode->left_margin; graphic_h_disp.f.graphic_h_end=mode->left_margin + mode->xres; writel(graphic_h_disp.val, remapped_regs + mmGRAPHIC_H_DISP); graphic_v_disp.val = 0; graphic_v_disp.f.graphic_v_start=mode->upper_margin; graphic_v_disp.f.graphic_v_end=mode->upper_margin + mode->yres; writel(graphic_v_disp.val, remapped_regs + mmGRAPHIC_V_DISP); crtc_total.val = 0; crtc_total.f.crtc_h_total=mode->left_margin + mode->xres + mode->right_margin; crtc_total.f.crtc_v_total=mode->upper_margin + mode->yres + mode->lower_margin; writel(crtc_total.val, remapped_regs + mmCRTC_TOTAL); writel(mode->crtc_ss, remapped_regs + mmCRTC_SS); writel(mode->crtc_ls, remapped_regs + mmCRTC_LS); writel(mode->crtc_gs, remapped_regs + mmCRTC_GS); writel(mode->crtc_vpos_gs, remapped_regs + mmCRTC_VPOS_GS); writel(mode->crtc_rev, remapped_regs + mmCRTC_REV); writel(mode->crtc_dclk, remapped_regs + mmCRTC_DCLK); writel(mode->crtc_gclk, remapped_regs + mmCRTC_GCLK); writel(mode->crtc_goe, remapped_regs + mmCRTC_GOE); writel(mode->crtc_ps1_active, remapped_regs + mmCRTC_PS1_ACTIVE); writel(regs->lcd_format, remapped_regs + mmLCD_FORMAT); writel(regs->lcdd_cntl1, remapped_regs + mmLCDD_CNTL1); writel(regs->lcdd_cntl2, remapped_regs + mmLCDD_CNTL2); writel(regs->genlcd_cntl1, remapped_regs + mmGENLCD_CNTL1); writel(regs->genlcd_cntl2, remapped_regs + mmGENLCD_CNTL2); writel(regs->genlcd_cntl3, remapped_regs + mmGENLCD_CNTL3); writel(0x00000000, remapped_regs + mmCRTC_FRAME); writel(0x00000000, remapped_regs + mmCRTC_FRAME_VPOS); writel(0x00000000, remapped_regs + mmCRTC_DEFAULT_COUNT); writel(0x0000FF00, remapped_regs + mmLCD_BACKGROUND_COLOR); /* Hack for overlay in ext memory */ temp32 = readl(remapped_regs + mmDISP_DEBUG2); temp32 |= 0xc0000000; writel(temp32, remapped_regs + mmDISP_DEBUG2); } static void w100_setup_memory(struct w100fb_par *par) { union mc_ext_mem_location_u extmem_location; union mc_fb_location_u intmem_location; struct w100_mem_info *mem = par->mach->mem; struct w100_bm_mem_info *bm_mem = par->mach->bm_mem; if (!par->extmem_active) { w100_suspend(W100_SUSPEND_EXTMEM); /* Map Internal Memory at FB Base */ intmem_location.f.mc_fb_start = W100_FB_BASE >> 8; intmem_location.f.mc_fb_top = (W100_FB_BASE+MEM_INT_SIZE) >> 8; writel((u32) (intmem_location.val), remapped_regs + mmMC_FB_LOCATION); /* Unmap External Memory - value is *probably* irrelevant but may have meaning to acceleration libraries */ extmem_location.f.mc_ext_mem_start = MEM_EXT_BASE_VALUE >> 8; extmem_location.f.mc_ext_mem_top = (MEM_EXT_BASE_VALUE-1) >> 8; writel((u32) (extmem_location.val), remapped_regs + mmMC_EXT_MEM_LOCATION); } else { /* Map Internal Memory to its default location */ intmem_location.f.mc_fb_start = MEM_INT_BASE_VALUE >> 8; intmem_location.f.mc_fb_top = (MEM_INT_BASE_VALUE+MEM_INT_SIZE) >> 8; writel((u32) (intmem_location.val), remapped_regs + mmMC_FB_LOCATION); /* Map External Memory at FB Base */ extmem_location.f.mc_ext_mem_start = W100_FB_BASE >> 8; extmem_location.f.mc_ext_mem_top = (W100_FB_BASE+par->mach->mem->size) >> 8; writel((u32) (extmem_location.val), remapped_regs + mmMC_EXT_MEM_LOCATION); writel(0x00007800, remapped_regs + mmMC_BIST_CTRL); writel(mem->ext_cntl, remapped_regs + mmMEM_EXT_CNTL); writel(0x00200021, remapped_regs + mmMEM_SDRAM_MODE_REG); udelay(100); writel(0x80200021, remapped_regs + mmMEM_SDRAM_MODE_REG); udelay(100); writel(mem->sdram_mode_reg, remapped_regs + mmMEM_SDRAM_MODE_REG); udelay(100); writel(mem->ext_timing_cntl, remapped_regs + mmMEM_EXT_TIMING_CNTL); writel(mem->io_cntl, remapped_regs + mmMEM_IO_CNTL); if (bm_mem) { writel(bm_mem->ext_mem_bw, remapped_regs + mmBM_EXT_MEM_BANDWIDTH); writel(bm_mem->offset, remapped_regs + mmBM_OFFSET); writel(bm_mem->ext_timing_ctl, remapped_regs + mmBM_MEM_EXT_TIMING_CNTL); writel(bm_mem->ext_cntl, remapped_regs + mmBM_MEM_EXT_CNTL); writel(bm_mem->mode_reg, remapped_regs + mmBM_MEM_MODE_REG); writel(bm_mem->io_cntl, remapped_regs + mmBM_MEM_IO_CNTL); writel(bm_mem->config, remapped_regs + mmBM_CONFIG); } } } static void w100_set_dispregs(struct w100fb_par *par) { unsigned long rot=0, divider, offset=0; union graphic_ctrl_u graphic_ctrl; /* See if the mode has been rotated */ if (par->xres == par->mode->xres) { if (par->flip) { rot=3; /* 180 degree */ offset=(par->xres * par->yres) - 1; } /* else 0 degree */ divider = par->mode->pixclk_divider; } else { if (par->flip) { rot=2; /* 270 degree */ offset=par->xres - 1; } else { rot=1; /* 90 degree */ offset=par->xres * (par->yres - 1); } divider = par->mode->pixclk_divider_rotated; } graphic_ctrl.val = 0; /* w32xx doesn't like undefined bits */ switch (par->chip_id) { case CHIP_ID_W100: graphic_ctrl.f_w100.color_depth=6; graphic_ctrl.f_w100.en_crtc=1; graphic_ctrl.f_w100.en_graphic_req=1; graphic_ctrl.f_w100.en_graphic_crtc=1; graphic_ctrl.f_w100.lcd_pclk_on=1; graphic_ctrl.f_w100.lcd_sclk_on=1; graphic_ctrl.f_w100.low_power_on=0; graphic_ctrl.f_w100.req_freq=0; graphic_ctrl.f_w100.portrait_mode=rot; /* Zaurus needs this */ switch(par->xres) { case 240: case 320: default: graphic_ctrl.f_w100.total_req_graphic=0xa0; break; case 480: case 640: switch(rot) { case 0: /* 0 */ case 3: /* 180 */ graphic_ctrl.f_w100.low_power_on=1; graphic_ctrl.f_w100.req_freq=5; break; case 1: /* 90 */ case 2: /* 270 */ graphic_ctrl.f_w100.req_freq=4; break; default: break; } graphic_ctrl.f_w100.total_req_graphic=0xf0; break; } break; case CHIP_ID_W3200: case CHIP_ID_W3220: graphic_ctrl.f_w32xx.color_depth=6; graphic_ctrl.f_w32xx.en_crtc=1; graphic_ctrl.f_w32xx.en_graphic_req=1; graphic_ctrl.f_w32xx.en_graphic_crtc=1; graphic_ctrl.f_w32xx.lcd_pclk_on=1; graphic_ctrl.f_w32xx.lcd_sclk_on=1; graphic_ctrl.f_w32xx.low_power_on=0; graphic_ctrl.f_w32xx.req_freq=0; graphic_ctrl.f_w32xx.total_req_graphic=par->mode->xres >> 1; /* panel xres, not mode */ graphic_ctrl.f_w32xx.portrait_mode=rot; break; } /* Set the pixel clock source and divider */ w100_pwr_state.pclk_cntl.f.pclk_src_sel = par->mode->pixclk_src; w100_pwr_state.pclk_cntl.f.pclk_post_div = divider; writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL); writel(graphic_ctrl.val, remapped_regs + mmGRAPHIC_CTRL); writel(W100_FB_BASE + ((offset * BITS_PER_PIXEL/8)&~0x03UL), remapped_regs + mmGRAPHIC_OFFSET); writel((par->xres*BITS_PER_PIXEL/8), remapped_regs + mmGRAPHIC_PITCH); } /* * Work out how long the sync pulse lasts * Value is 1/(time in seconds) */ static void calc_hsync(struct w100fb_par *par) { unsigned long hsync; struct w100_mode *mode = par->mode; union crtc_ss_u crtc_ss; if (mode->pixclk_src == CLK_SRC_XTAL) hsync=par->mach->xtal_freq; else hsync=((par->fastpll_mode && mode->fast_pll_freq) ? mode->fast_pll_freq : mode->pll_freq)*100000; hsync /= (w100_pwr_state.pclk_cntl.f.pclk_post_div + 1); crtc_ss.val = readl(remapped_regs + mmCRTC_SS); if (crtc_ss.val) par->hsync_len = hsync / (crtc_ss.f.ss_end-crtc_ss.f.ss_start); else par->hsync_len = 0; } static void w100_suspend(u32 mode) { u32 val; writel(0x7FFF8000, remapped_regs + mmMC_EXT_MEM_LOCATION); writel(0x00FF0000, remapped_regs + mmMC_PERF_MON_CNTL); val = readl(remapped_regs + mmMEM_EXT_TIMING_CNTL); val &= ~(0x00100000); /* bit20=0 */ val |= 0xFF000000; /* bit31:24=0xff */ writel(val, remapped_regs + mmMEM_EXT_TIMING_CNTL); val = readl(remapped_regs + mmMEM_EXT_CNTL); val &= ~(0x00040000); /* bit18=0 */ val |= 0x00080000; /* bit19=1 */ writel(val, remapped_regs + mmMEM_EXT_CNTL); udelay(1); /* wait 1us */ if (mode == W100_SUSPEND_EXTMEM) { /* CKE: Tri-State */ val = readl(remapped_regs + mmMEM_EXT_CNTL); val |= 0x40000000; /* bit30=1 */ writel(val, remapped_regs + mmMEM_EXT_CNTL); /* CLK: Stop */ val = readl(remapped_regs + mmMEM_EXT_CNTL); val &= ~(0x00000001); /* bit0=0 */ writel(val, remapped_regs + mmMEM_EXT_CNTL); } else { writel(0x00000000, remapped_regs + mmSCLK_CNTL); writel(0x000000BF, remapped_regs + mmCLK_PIN_CNTL); writel(0x00000015, remapped_regs + mmPWRMGT_CNTL); udelay(5); val = readl(remapped_regs + mmPLL_CNTL); val |= 0x00000004; /* bit2=1 */ writel(val, remapped_regs + mmPLL_CNTL); writel(0x0000001d, remapped_regs + mmPWRMGT_CNTL); } } static void w100_vsync(void) { u32 tmp; int timeout = 30000; /* VSync timeout = 30[ms] > 16.8[ms] */ tmp = readl(remapped_regs + mmACTIVE_V_DISP); /* set vline pos */ writel((tmp >> 16) & 0x3ff, remapped_regs + mmDISP_INT_CNTL); /* disable vline irq */ tmp = readl(remapped_regs + mmGEN_INT_CNTL); tmp &= ~0x00000002; writel(tmp, remapped_regs + mmGEN_INT_CNTL); /* clear vline irq status */ writel(0x00000002, remapped_regs + mmGEN_INT_STATUS); /* enable vline irq */ writel((tmp | 0x00000002), remapped_regs + mmGEN_INT_CNTL); /* clear vline irq status */ writel(0x00000002, remapped_regs + mmGEN_INT_STATUS); while(timeout > 0) { if (readl(remapped_regs + mmGEN_INT_STATUS) & 0x00000002) break; udelay(1); timeout--; } /* disable vline irq */ writel(tmp, remapped_regs + mmGEN_INT_CNTL); /* clear vline irq status */ writel(0x00000002, remapped_regs + mmGEN_INT_STATUS); } static struct platform_driver w100fb_driver = { .probe = w100fb_probe, .remove = __devexit_p(w100fb_remove), .suspend = w100fb_suspend, .resume = w100fb_resume, .driver = { .name = "w100fb", }, }; module_platform_driver(w100fb_driver); MODULE_DESCRIPTION("ATI Imageon w100 framebuffer driver"); MODULE_LICENSE("GPL");
gpl-2.0
computersforpeace/UBIFS-backports
drivers/hwmon/adt7462.c
4940
60232
/* * A hwmon driver for the Analog Devices ADT7462 * Copyright (C) 2008 IBM * * Author: Darrick J. Wong <djwong@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/log2.h> #include <linux/slab.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END }; /* ADT7462 registers */ #define ADT7462_REG_DEVICE 0x3D #define ADT7462_REG_VENDOR 0x3E #define ADT7462_REG_REVISION 0x3F #define ADT7462_REG_MIN_TEMP_BASE_ADDR 0x44 #define ADT7462_REG_MIN_TEMP_MAX_ADDR 0x47 #define ADT7462_REG_MAX_TEMP_BASE_ADDR 0x48 #define ADT7462_REG_MAX_TEMP_MAX_ADDR 0x4B #define ADT7462_REG_TEMP_BASE_ADDR 0x88 #define ADT7462_REG_TEMP_MAX_ADDR 0x8F #define ADT7462_REG_FAN_BASE_ADDR 0x98 #define ADT7462_REG_FAN_MAX_ADDR 0x9F #define ADT7462_REG_FAN2_BASE_ADDR 0xA2 #define ADT7462_REG_FAN2_MAX_ADDR 0xA9 #define ADT7462_REG_FAN_ENABLE 0x07 #define ADT7462_REG_FAN_MIN_BASE_ADDR 0x78 #define ADT7462_REG_FAN_MIN_MAX_ADDR 0x7F #define ADT7462_REG_CFG2 0x02 #define ADT7462_FSPD_MASK 0x20 #define ADT7462_REG_PWM_BASE_ADDR 0xAA #define ADT7462_REG_PWM_MAX_ADDR 0xAD #define ADT7462_REG_PWM_MIN_BASE_ADDR 0x28 #define ADT7462_REG_PWM_MIN_MAX_ADDR 0x2B #define ADT7462_REG_PWM_MAX 0x2C #define ADT7462_REG_PWM_TEMP_MIN_BASE_ADDR 0x5C #define ADT7462_REG_PWM_TEMP_MIN_MAX_ADDR 0x5F #define ADT7462_REG_PWM_TEMP_RANGE_BASE_ADDR 0x60 #define ADT7462_REG_PWM_TEMP_RANGE_MAX_ADDR 0x63 #define ADT7462_PWM_HYST_MASK 0x0F #define ADT7462_PWM_RANGE_MASK 0xF0 #define ADT7462_PWM_RANGE_SHIFT 4 #define ADT7462_REG_PWM_CFG_BASE_ADDR 0x21 #define ADT7462_REG_PWM_CFG_MAX_ADDR 0x24 #define ADT7462_PWM_CHANNEL_MASK 0xE0 #define ADT7462_PWM_CHANNEL_SHIFT 5 #define ADT7462_REG_PIN_CFG_BASE_ADDR 0x10 #define ADT7462_REG_PIN_CFG_MAX_ADDR 0x13 #define ADT7462_PIN7_INPUT 0x01 /* cfg0 */ #define ADT7462_DIODE3_INPUT 0x20 #define ADT7462_DIODE1_INPUT 0x40 #define ADT7462_VID_INPUT 0x80 #define ADT7462_PIN22_INPUT 0x04 /* cfg1 */ #define ADT7462_PIN21_INPUT 0x08 #define ADT7462_PIN19_INPUT 0x10 #define ADT7462_PIN15_INPUT 0x20 #define ADT7462_PIN13_INPUT 0x40 #define ADT7462_PIN8_INPUT 0x80 #define ADT7462_PIN23_MASK 0x03 #define ADT7462_PIN23_SHIFT 0 #define ADT7462_PIN26_MASK 0x0C /* cfg2 */ #define ADT7462_PIN26_SHIFT 2 #define ADT7462_PIN25_MASK 0x30 #define ADT7462_PIN25_SHIFT 4 #define ADT7462_PIN24_MASK 0xC0 #define ADT7462_PIN24_SHIFT 6 #define ADT7462_PIN26_VOLT_INPUT 0x08 #define ADT7462_PIN25_VOLT_INPUT 0x20 #define ADT7462_PIN28_SHIFT 4 /* cfg3 */ #define ADT7462_PIN28_VOLT 0x5 #define ADT7462_REG_ALARM1 0xB8 #define ADT7462_LT_ALARM 0x02 #define ADT7462_R1T_ALARM 0x04 #define ADT7462_R2T_ALARM 0x08 #define ADT7462_R3T_ALARM 0x10 #define ADT7462_REG_ALARM2 0xBB #define ADT7462_V0_ALARM 0x01 #define ADT7462_V1_ALARM 0x02 #define ADT7462_V2_ALARM 0x04 #define ADT7462_V3_ALARM 0x08 #define ADT7462_V4_ALARM 0x10 #define ADT7462_V5_ALARM 0x20 #define ADT7462_V6_ALARM 0x40 #define ADT7462_V7_ALARM 0x80 #define ADT7462_REG_ALARM3 0xBC #define ADT7462_V8_ALARM 0x08 #define ADT7462_V9_ALARM 0x10 #define ADT7462_V10_ALARM 0x20 #define ADT7462_V11_ALARM 0x40 #define ADT7462_V12_ALARM 0x80 #define ADT7462_REG_ALARM4 0xBD #define ADT7462_F0_ALARM 0x01 #define ADT7462_F1_ALARM 0x02 #define ADT7462_F2_ALARM 0x04 #define ADT7462_F3_ALARM 0x08 #define ADT7462_F4_ALARM 0x10 #define ADT7462_F5_ALARM 0x20 #define ADT7462_F6_ALARM 0x40 #define ADT7462_F7_ALARM 0x80 #define ADT7462_ALARM1 0x0000 #define ADT7462_ALARM2 0x0100 #define ADT7462_ALARM3 0x0200 #define ADT7462_ALARM4 0x0300 #define ADT7462_ALARM_REG_SHIFT 8 #define ADT7462_ALARM_FLAG_MASK 0x0F #define ADT7462_TEMP_COUNT 4 #define ADT7462_TEMP_REG(x) (ADT7462_REG_TEMP_BASE_ADDR + ((x) * 2)) #define ADT7462_TEMP_MIN_REG(x) (ADT7462_REG_MIN_TEMP_BASE_ADDR + (x)) #define ADT7462_TEMP_MAX_REG(x) (ADT7462_REG_MAX_TEMP_BASE_ADDR + (x)) #define TEMP_FRAC_OFFSET 6 #define ADT7462_FAN_COUNT 8 #define ADT7462_REG_FAN_MIN(x) (ADT7462_REG_FAN_MIN_BASE_ADDR + (x)) #define ADT7462_PWM_COUNT 4 #define ADT7462_REG_PWM(x) (ADT7462_REG_PWM_BASE_ADDR + (x)) #define ADT7462_REG_PWM_MIN(x) (ADT7462_REG_PWM_MIN_BASE_ADDR + (x)) #define ADT7462_REG_PWM_TMIN(x) \ (ADT7462_REG_PWM_TEMP_MIN_BASE_ADDR + (x)) #define ADT7462_REG_PWM_TRANGE(x) \ (ADT7462_REG_PWM_TEMP_RANGE_BASE_ADDR + (x)) #define ADT7462_PIN_CFG_REG_COUNT 4 #define ADT7462_REG_PIN_CFG(x) (ADT7462_REG_PIN_CFG_BASE_ADDR + (x)) #define ADT7462_REG_PWM_CFG(x) (ADT7462_REG_PWM_CFG_BASE_ADDR + (x)) #define ADT7462_ALARM_REG_COUNT 4 /* * The chip can measure 13 different voltage sources: * * 1. +12V1 (pin 7) * 2. Vccp1/+2.5V/+1.8V/+1.5V (pin 23) * 3. +12V3 (pin 22) * 4. +5V (pin 21) * 5. +1.25V/+0.9V (pin 19) * 6. +2.5V/+1.8V (pin 15) * 7. +3.3v (pin 13) * 8. +12V2 (pin 8) * 9. Vbatt/FSB_Vtt (pin 26) * A. +3.3V/+1.2V1 (pin 25) * B. Vccp2/+2.5V/+1.8V/+1.5V (pin 24) * C. +1.5V ICH (only if BOTH pin 28/29 are set to +1.5V) * D. +1.5V 3GPIO (only if BOTH pin 28/29 are set to +1.5V) * * Each of these 13 has a factor to convert raw to voltage. Even better, * the pins can be connected to other sensors (tach/gpio/hot/etc), which * makes the bookkeeping tricky. * * Some, but not all, of these voltages have low/high limits. */ #define ADT7462_VOLT_COUNT 13 #define ADT7462_VENDOR 0x41 #define ADT7462_DEVICE 0x62 /* datasheet only mentions a revision 4 */ #define ADT7462_REVISION 0x04 /* How often do we reread sensors values? (In jiffies) */ #define SENSOR_REFRESH_INTERVAL (2 * HZ) /* How often do we reread sensor limit values? (In jiffies) */ #define LIMIT_REFRESH_INTERVAL (60 * HZ) /* datasheet says to divide this number by the fan reading to get fan rpm */ #define FAN_PERIOD_TO_RPM(x) ((90000 * 60) / (x)) #define FAN_RPM_TO_PERIOD FAN_PERIOD_TO_RPM #define FAN_PERIOD_INVALID 65535 #define FAN_DATA_VALID(x) ((x) && (x) != FAN_PERIOD_INVALID) #define MASK_AND_SHIFT(value, prefix) \ (((value) & prefix##_MASK) >> prefix##_SHIFT) struct adt7462_data { struct device *hwmon_dev; struct attribute_group attrs; struct mutex lock; char sensors_valid; char limits_valid; unsigned long sensors_last_updated; /* In jiffies */ unsigned long limits_last_updated; /* In jiffies */ u8 temp[ADT7462_TEMP_COUNT]; /* bits 6-7 are quarter pieces of temp */ u8 temp_frac[ADT7462_TEMP_COUNT]; u8 temp_min[ADT7462_TEMP_COUNT]; u8 temp_max[ADT7462_TEMP_COUNT]; u16 fan[ADT7462_FAN_COUNT]; u8 fan_enabled; u8 fan_min[ADT7462_FAN_COUNT]; u8 cfg2; u8 pwm[ADT7462_PWM_COUNT]; u8 pin_cfg[ADT7462_PIN_CFG_REG_COUNT]; u8 voltages[ADT7462_VOLT_COUNT]; u8 volt_max[ADT7462_VOLT_COUNT]; u8 volt_min[ADT7462_VOLT_COUNT]; u8 pwm_min[ADT7462_PWM_COUNT]; u8 pwm_tmin[ADT7462_PWM_COUNT]; u8 pwm_trange[ADT7462_PWM_COUNT]; u8 pwm_max; /* only one per chip */ u8 pwm_cfg[ADT7462_PWM_COUNT]; u8 alarms[ADT7462_ALARM_REG_COUNT]; }; static int adt7462_probe(struct i2c_client *client, const struct i2c_device_id *id); static int adt7462_detect(struct i2c_client *client, struct i2c_board_info *info); static int adt7462_remove(struct i2c_client *client); static const struct i2c_device_id adt7462_id[] = { { "adt7462", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adt7462_id); static struct i2c_driver adt7462_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "adt7462", }, .probe = adt7462_probe, .remove = adt7462_remove, .id_table = adt7462_id, .detect = adt7462_detect, .address_list = normal_i2c, }; /* * 16-bit registers on the ADT7462 are low-byte first. The data sheet says * that the low byte must be read before the high byte. */ static inline int adt7462_read_word_data(struct i2c_client *client, u8 reg) { u16 foo; foo = i2c_smbus_read_byte_data(client, reg); foo |= ((u16)i2c_smbus_read_byte_data(client, reg + 1) << 8); return foo; } /* For some reason these registers are not contiguous. */ static int ADT7462_REG_FAN(int fan) { if (fan < 4) return ADT7462_REG_FAN_BASE_ADDR + (2 * fan); return ADT7462_REG_FAN2_BASE_ADDR + (2 * (fan - 4)); } /* Voltage registers are scattered everywhere */ static int ADT7462_REG_VOLT_MAX(struct adt7462_data *data, int which) { switch (which) { case 0: if (!(data->pin_cfg[0] & ADT7462_PIN7_INPUT)) return 0x7C; break; case 1: return 0x69; case 2: if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT)) return 0x7F; break; case 3: if (!(data->pin_cfg[1] & ADT7462_PIN21_INPUT)) return 0x7E; break; case 4: if (!(data->pin_cfg[0] & ADT7462_DIODE3_INPUT)) return 0x4B; break; case 5: if (!(data->pin_cfg[0] & ADT7462_DIODE1_INPUT)) return 0x49; break; case 6: if (!(data->pin_cfg[1] & ADT7462_PIN13_INPUT)) return 0x68; break; case 7: if (!(data->pin_cfg[1] & ADT7462_PIN8_INPUT)) return 0x7D; break; case 8: if (!(data->pin_cfg[2] & ADT7462_PIN26_VOLT_INPUT)) return 0x6C; break; case 9: if (!(data->pin_cfg[2] & ADT7462_PIN25_VOLT_INPUT)) return 0x6B; break; case 10: return 0x6A; case 11: if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT == ADT7462_PIN28_VOLT && !(data->pin_cfg[0] & ADT7462_VID_INPUT)) return 0x50; break; case 12: if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT == ADT7462_PIN28_VOLT && !(data->pin_cfg[0] & ADT7462_VID_INPUT)) return 0x4C; break; } return -ENODEV; } static int ADT7462_REG_VOLT_MIN(struct adt7462_data *data, int which) { switch (which) { case 0: if (!(data->pin_cfg[0] & ADT7462_PIN7_INPUT)) return 0x6D; break; case 1: return 0x72; case 2: if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT)) return 0x6F; break; case 3: if (!(data->pin_cfg[1] & ADT7462_PIN21_INPUT)) return 0x71; break; case 4: if (!(data->pin_cfg[0] & ADT7462_DIODE3_INPUT)) return 0x47; break; case 5: if (!(data->pin_cfg[0] & ADT7462_DIODE1_INPUT)) return 0x45; break; case 6: if (!(data->pin_cfg[1] & ADT7462_PIN13_INPUT)) return 0x70; break; case 7: if (!(data->pin_cfg[1] & ADT7462_PIN8_INPUT)) return 0x6E; break; case 8: if (!(data->pin_cfg[2] & ADT7462_PIN26_VOLT_INPUT)) return 0x75; break; case 9: if (!(data->pin_cfg[2] & ADT7462_PIN25_VOLT_INPUT)) return 0x74; break; case 10: return 0x73; case 11: if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT == ADT7462_PIN28_VOLT && !(data->pin_cfg[0] & ADT7462_VID_INPUT)) return 0x76; break; case 12: if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT == ADT7462_PIN28_VOLT && !(data->pin_cfg[0] & ADT7462_VID_INPUT)) return 0x77; break; } return -ENODEV; } static int ADT7462_REG_VOLT(struct adt7462_data *data, int which) { switch (which) { case 0: if (!(data->pin_cfg[0] & ADT7462_PIN7_INPUT)) return 0xA3; break; case 1: return 0x90; case 2: if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT)) return 0xA9; break; case 3: if (!(data->pin_cfg[1] & ADT7462_PIN21_INPUT)) return 0xA7; break; case 4: if (!(data->pin_cfg[0] & ADT7462_DIODE3_INPUT)) return 0x8F; break; case 5: if (!(data->pin_cfg[0] & ADT7462_DIODE1_INPUT)) return 0x8B; break; case 6: if (!(data->pin_cfg[1] & ADT7462_PIN13_INPUT)) return 0x96; break; case 7: if (!(data->pin_cfg[1] & ADT7462_PIN8_INPUT)) return 0xA5; break; case 8: if (!(data->pin_cfg[2] & ADT7462_PIN26_VOLT_INPUT)) return 0x93; break; case 9: if (!(data->pin_cfg[2] & ADT7462_PIN25_VOLT_INPUT)) return 0x92; break; case 10: return 0x91; case 11: if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT == ADT7462_PIN28_VOLT && !(data->pin_cfg[0] & ADT7462_VID_INPUT)) return 0x94; break; case 12: if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT == ADT7462_PIN28_VOLT && !(data->pin_cfg[0] & ADT7462_VID_INPUT)) return 0x95; break; } return -ENODEV; } /* Provide labels for sysfs */ static const char *voltage_label(struct adt7462_data *data, int which) { switch (which) { case 0: if (!(data->pin_cfg[0] & ADT7462_PIN7_INPUT)) return "+12V1"; break; case 1: switch (MASK_AND_SHIFT(data->pin_cfg[1], ADT7462_PIN23)) { case 0: return "Vccp1"; case 1: return "+2.5V"; case 2: return "+1.8V"; case 3: return "+1.5V"; } case 2: if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT)) return "+12V3"; break; case 3: if (!(data->pin_cfg[1] & ADT7462_PIN21_INPUT)) return "+5V"; break; case 4: if (!(data->pin_cfg[0] & ADT7462_DIODE3_INPUT)) { if (data->pin_cfg[1] & ADT7462_PIN19_INPUT) return "+0.9V"; return "+1.25V"; } break; case 5: if (!(data->pin_cfg[0] & ADT7462_DIODE1_INPUT)) { if (data->pin_cfg[1] & ADT7462_PIN19_INPUT) return "+1.8V"; return "+2.5V"; } break; case 6: if (!(data->pin_cfg[1] & ADT7462_PIN13_INPUT)) return "+3.3V"; break; case 7: if (!(data->pin_cfg[1] & ADT7462_PIN8_INPUT)) return "+12V2"; break; case 8: switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN26)) { case 0: return "Vbatt"; case 1: return "FSB_Vtt"; } break; case 9: switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN25)) { case 0: return "+3.3V"; case 1: return "+1.2V1"; } break; case 10: switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN24)) { case 0: return "Vccp2"; case 1: return "+2.5V"; case 2: return "+1.8V"; case 3: return "+1.5"; } case 11: if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT == ADT7462_PIN28_VOLT && !(data->pin_cfg[0] & ADT7462_VID_INPUT)) return "+1.5V ICH"; break; case 12: if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT == ADT7462_PIN28_VOLT && !(data->pin_cfg[0] & ADT7462_VID_INPUT)) return "+1.5V 3GPIO"; break; } return "N/A"; } /* Multipliers are actually in uV, not mV. */ static int voltage_multiplier(struct adt7462_data *data, int which) { switch (which) { case 0: if (!(data->pin_cfg[0] & ADT7462_PIN7_INPUT)) return 62500; break; case 1: switch (MASK_AND_SHIFT(data->pin_cfg[1], ADT7462_PIN23)) { case 0: if (data->pin_cfg[0] & ADT7462_VID_INPUT) return 12500; return 6250; case 1: return 13000; case 2: return 9400; case 3: return 7800; } case 2: if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT)) return 62500; break; case 3: if (!(data->pin_cfg[1] & ADT7462_PIN21_INPUT)) return 26000; break; case 4: if (!(data->pin_cfg[0] & ADT7462_DIODE3_INPUT)) { if (data->pin_cfg[1] & ADT7462_PIN19_INPUT) return 4690; return 6500; } break; case 5: if (!(data->pin_cfg[0] & ADT7462_DIODE1_INPUT)) { if (data->pin_cfg[1] & ADT7462_PIN15_INPUT) return 9400; return 13000; } break; case 6: if (!(data->pin_cfg[1] & ADT7462_PIN13_INPUT)) return 17200; break; case 7: if (!(data->pin_cfg[1] & ADT7462_PIN8_INPUT)) return 62500; break; case 8: switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN26)) { case 0: return 15600; case 1: return 6250; } break; case 9: switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN25)) { case 0: return 17200; case 1: return 6250; } break; case 10: switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN24)) { case 0: return 6250; case 1: return 13000; case 2: return 9400; case 3: return 7800; } case 11: case 12: if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT == ADT7462_PIN28_VOLT && !(data->pin_cfg[0] & ADT7462_VID_INPUT)) return 7800; } return 0; } static int temp_enabled(struct adt7462_data *data, int which) { switch (which) { case 0: case 2: return 1; case 1: if (data->pin_cfg[0] & ADT7462_DIODE1_INPUT) return 1; break; case 3: if (data->pin_cfg[0] & ADT7462_DIODE3_INPUT) return 1; break; } return 0; } static const char *temp_label(struct adt7462_data *data, int which) { switch (which) { case 0: return "local"; case 1: if (data->pin_cfg[0] & ADT7462_DIODE1_INPUT) return "remote1"; break; case 2: return "remote2"; case 3: if (data->pin_cfg[0] & ADT7462_DIODE3_INPUT) return "remote3"; break; } return "N/A"; } /* Map Trange register values to mC */ #define NUM_TRANGE_VALUES 16 static const int trange_values[NUM_TRANGE_VALUES] = { 2000, 2500, 3300, 4000, 5000, 6700, 8000, 10000, 13300, 16000, 20000, 26700, 32000, 40000, 53300, 80000 }; static int find_trange_value(int trange) { int i; for (i = 0; i < NUM_TRANGE_VALUES; i++) if (trange_values[i] == trange) return i; return -ENODEV; } static struct adt7462_data *adt7462_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct adt7462_data *data = i2c_get_clientdata(client); unsigned long local_jiffies = jiffies; int i; mutex_lock(&data->lock); if (time_before(local_jiffies, data->sensors_last_updated + SENSOR_REFRESH_INTERVAL) && data->sensors_valid) goto no_sensor_update; for (i = 0; i < ADT7462_TEMP_COUNT; i++) { /* * Reading the fractional register locks the integral * register until both have been read. */ data->temp_frac[i] = i2c_smbus_read_byte_data(client, ADT7462_TEMP_REG(i)); data->temp[i] = i2c_smbus_read_byte_data(client, ADT7462_TEMP_REG(i) + 1); } for (i = 0; i < ADT7462_FAN_COUNT; i++) data->fan[i] = adt7462_read_word_data(client, ADT7462_REG_FAN(i)); data->fan_enabled = i2c_smbus_read_byte_data(client, ADT7462_REG_FAN_ENABLE); for (i = 0; i < ADT7462_PWM_COUNT; i++) data->pwm[i] = i2c_smbus_read_byte_data(client, ADT7462_REG_PWM(i)); for (i = 0; i < ADT7462_PIN_CFG_REG_COUNT; i++) data->pin_cfg[i] = i2c_smbus_read_byte_data(client, ADT7462_REG_PIN_CFG(i)); for (i = 0; i < ADT7462_VOLT_COUNT; i++) { int reg = ADT7462_REG_VOLT(data, i); if (!reg) data->voltages[i] = 0; else data->voltages[i] = i2c_smbus_read_byte_data(client, reg); } data->alarms[0] = i2c_smbus_read_byte_data(client, ADT7462_REG_ALARM1); data->alarms[1] = i2c_smbus_read_byte_data(client, ADT7462_REG_ALARM2); data->alarms[2] = i2c_smbus_read_byte_data(client, ADT7462_REG_ALARM3); data->alarms[3] = i2c_smbus_read_byte_data(client, ADT7462_REG_ALARM4); data->sensors_last_updated = local_jiffies; data->sensors_valid = 1; no_sensor_update: if (time_before(local_jiffies, data->limits_last_updated + LIMIT_REFRESH_INTERVAL) && data->limits_valid) goto out; for (i = 0; i < ADT7462_TEMP_COUNT; i++) { data->temp_min[i] = i2c_smbus_read_byte_data(client, ADT7462_TEMP_MIN_REG(i)); data->temp_max[i] = i2c_smbus_read_byte_data(client, ADT7462_TEMP_MAX_REG(i)); } for (i = 0; i < ADT7462_FAN_COUNT; i++) data->fan_min[i] = i2c_smbus_read_byte_data(client, ADT7462_REG_FAN_MIN(i)); for (i = 0; i < ADT7462_VOLT_COUNT; i++) { int reg = ADT7462_REG_VOLT_MAX(data, i); data->volt_max[i] = (reg ? i2c_smbus_read_byte_data(client, reg) : 0); reg = ADT7462_REG_VOLT_MIN(data, i); data->volt_min[i] = (reg ? i2c_smbus_read_byte_data(client, reg) : 0); } for (i = 0; i < ADT7462_PWM_COUNT; i++) { data->pwm_min[i] = i2c_smbus_read_byte_data(client, ADT7462_REG_PWM_MIN(i)); data->pwm_tmin[i] = i2c_smbus_read_byte_data(client, ADT7462_REG_PWM_TMIN(i)); data->pwm_trange[i] = i2c_smbus_read_byte_data(client, ADT7462_REG_PWM_TRANGE(i)); data->pwm_cfg[i] = i2c_smbus_read_byte_data(client, ADT7462_REG_PWM_CFG(i)); } data->pwm_max = i2c_smbus_read_byte_data(client, ADT7462_REG_PWM_MAX); data->cfg2 = i2c_smbus_read_byte_data(client, ADT7462_REG_CFG2); data->limits_last_updated = local_jiffies; data->limits_valid = 1; out: mutex_unlock(&data->lock); return data; } static ssize_t show_temp_min(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); if (!temp_enabled(data, attr->index)) return sprintf(buf, "0\n"); return sprintf(buf, "%d\n", 1000 * (data->temp_min[attr->index] - 64)); } static ssize_t set_temp_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct adt7462_data *data = i2c_get_clientdata(client); long temp; if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index)) return -EINVAL; temp = DIV_ROUND_CLOSEST(temp, 1000) + 64; temp = SENSORS_LIMIT(temp, 0, 255); mutex_lock(&data->lock); data->temp_min[attr->index] = temp; i2c_smbus_write_byte_data(client, ADT7462_TEMP_MIN_REG(attr->index), temp); mutex_unlock(&data->lock); return count; } static ssize_t show_temp_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); if (!temp_enabled(data, attr->index)) return sprintf(buf, "0\n"); return sprintf(buf, "%d\n", 1000 * (data->temp_max[attr->index] - 64)); } static ssize_t set_temp_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct adt7462_data *data = i2c_get_clientdata(client); long temp; if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index)) return -EINVAL; temp = DIV_ROUND_CLOSEST(temp, 1000) + 64; temp = SENSORS_LIMIT(temp, 0, 255); mutex_lock(&data->lock); data->temp_max[attr->index] = temp; i2c_smbus_write_byte_data(client, ADT7462_TEMP_MAX_REG(attr->index), temp); mutex_unlock(&data->lock); return count; } static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); u8 frac = data->temp_frac[attr->index] >> TEMP_FRAC_OFFSET; if (!temp_enabled(data, attr->index)) return sprintf(buf, "0\n"); return sprintf(buf, "%d\n", 1000 * (data->temp[attr->index] - 64) + 250 * frac); } static ssize_t show_temp_label(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); return sprintf(buf, "%s\n", temp_label(data, attr->index)); } static ssize_t show_volt_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); int x = voltage_multiplier(data, attr->index); x *= data->volt_max[attr->index]; x /= 1000; /* convert from uV to mV */ return sprintf(buf, "%d\n", x); } static ssize_t set_volt_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct adt7462_data *data = i2c_get_clientdata(client); int x = voltage_multiplier(data, attr->index); long temp; if (kstrtol(buf, 10, &temp) || !x) return -EINVAL; temp *= 1000; /* convert mV to uV */ temp = DIV_ROUND_CLOSEST(temp, x); temp = SENSORS_LIMIT(temp, 0, 255); mutex_lock(&data->lock); data->volt_max[attr->index] = temp; i2c_smbus_write_byte_data(client, ADT7462_REG_VOLT_MAX(data, attr->index), temp); mutex_unlock(&data->lock); return count; } static ssize_t show_volt_min(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); int x = voltage_multiplier(data, attr->index); x *= data->volt_min[attr->index]; x /= 1000; /* convert from uV to mV */ return sprintf(buf, "%d\n", x); } static ssize_t set_volt_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct adt7462_data *data = i2c_get_clientdata(client); int x = voltage_multiplier(data, attr->index); long temp; if (kstrtol(buf, 10, &temp) || !x) return -EINVAL; temp *= 1000; /* convert mV to uV */ temp = DIV_ROUND_CLOSEST(temp, x); temp = SENSORS_LIMIT(temp, 0, 255); mutex_lock(&data->lock); data->volt_min[attr->index] = temp; i2c_smbus_write_byte_data(client, ADT7462_REG_VOLT_MIN(data, attr->index), temp); mutex_unlock(&data->lock); return count; } static ssize_t show_voltage(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); int x = voltage_multiplier(data, attr->index); x *= data->voltages[attr->index]; x /= 1000; /* convert from uV to mV */ return sprintf(buf, "%d\n", x); } static ssize_t show_voltage_label(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); return sprintf(buf, "%s\n", voltage_label(data, attr->index)); } static ssize_t show_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); int reg = attr->index >> ADT7462_ALARM_REG_SHIFT; int mask = attr->index & ADT7462_ALARM_FLAG_MASK; if (data->alarms[reg] & mask) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static int fan_enabled(struct adt7462_data *data, int fan) { return data->fan_enabled & (1 << fan); } static ssize_t show_fan_min(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); u16 temp; /* Only the MSB of the min fan period is stored... */ temp = data->fan_min[attr->index]; temp <<= 8; if (!fan_enabled(data, attr->index) || !FAN_DATA_VALID(temp)) return sprintf(buf, "0\n"); return sprintf(buf, "%d\n", FAN_PERIOD_TO_RPM(temp)); } static ssize_t set_fan_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct adt7462_data *data = i2c_get_clientdata(client); long temp; if (kstrtol(buf, 10, &temp) || !temp || !fan_enabled(data, attr->index)) return -EINVAL; temp = FAN_RPM_TO_PERIOD(temp); temp >>= 8; temp = SENSORS_LIMIT(temp, 1, 255); mutex_lock(&data->lock); data->fan_min[attr->index] = temp; i2c_smbus_write_byte_data(client, ADT7462_REG_FAN_MIN(attr->index), temp); mutex_unlock(&data->lock); return count; } static ssize_t show_fan(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); if (!fan_enabled(data, attr->index) || !FAN_DATA_VALID(data->fan[attr->index])) return sprintf(buf, "0\n"); return sprintf(buf, "%d\n", FAN_PERIOD_TO_RPM(data->fan[attr->index])); } static ssize_t show_force_pwm_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct adt7462_data *data = adt7462_update_device(dev); return sprintf(buf, "%d\n", (data->cfg2 & ADT7462_FSPD_MASK ? 1 : 0)); } static ssize_t set_force_pwm_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct adt7462_data *data = i2c_get_clientdata(client); long temp; u8 reg; if (kstrtol(buf, 10, &temp)) return -EINVAL; mutex_lock(&data->lock); reg = i2c_smbus_read_byte_data(client, ADT7462_REG_CFG2); if (temp) reg |= ADT7462_FSPD_MASK; else reg &= ~ADT7462_FSPD_MASK; data->cfg2 = reg; i2c_smbus_write_byte_data(client, ADT7462_REG_CFG2, reg); mutex_unlock(&data->lock); return count; } static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); return sprintf(buf, "%d\n", data->pwm[attr->index]); } static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct adt7462_data *data = i2c_get_clientdata(client); long temp; if (kstrtol(buf, 10, &temp)) return -EINVAL; temp = SENSORS_LIMIT(temp, 0, 255); mutex_lock(&data->lock); data->pwm[attr->index] = temp; i2c_smbus_write_byte_data(client, ADT7462_REG_PWM(attr->index), temp); mutex_unlock(&data->lock); return count; } static ssize_t show_pwm_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct adt7462_data *data = adt7462_update_device(dev); return sprintf(buf, "%d\n", data->pwm_max); } static ssize_t set_pwm_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct adt7462_data *data = i2c_get_clientdata(client); long temp; if (kstrtol(buf, 10, &temp)) return -EINVAL; temp = SENSORS_LIMIT(temp, 0, 255); mutex_lock(&data->lock); data->pwm_max = temp; i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_MAX, temp); mutex_unlock(&data->lock); return count; } static ssize_t show_pwm_min(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); return sprintf(buf, "%d\n", data->pwm_min[attr->index]); } static ssize_t set_pwm_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct adt7462_data *data = i2c_get_clientdata(client); long temp; if (kstrtol(buf, 10, &temp)) return -EINVAL; temp = SENSORS_LIMIT(temp, 0, 255); mutex_lock(&data->lock); data->pwm_min[attr->index] = temp; i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_MIN(attr->index), temp); mutex_unlock(&data->lock); return count; } static ssize_t show_pwm_hyst(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); return sprintf(buf, "%d\n", 1000 * (data->pwm_trange[attr->index] & ADT7462_PWM_HYST_MASK)); } static ssize_t set_pwm_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct adt7462_data *data = i2c_get_clientdata(client); long temp; if (kstrtol(buf, 10, &temp)) return -EINVAL; temp = DIV_ROUND_CLOSEST(temp, 1000); temp = SENSORS_LIMIT(temp, 0, 15); /* package things up */ temp &= ADT7462_PWM_HYST_MASK; temp |= data->pwm_trange[attr->index] & ADT7462_PWM_RANGE_MASK; mutex_lock(&data->lock); data->pwm_trange[attr->index] = temp; i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_TRANGE(attr->index), temp); mutex_unlock(&data->lock); return count; } static ssize_t show_pwm_tmax(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); /* tmax = tmin + trange */ int trange = trange_values[data->pwm_trange[attr->index] >> ADT7462_PWM_RANGE_SHIFT]; int tmin = (data->pwm_tmin[attr->index] - 64) * 1000; return sprintf(buf, "%d\n", tmin + trange); } static ssize_t set_pwm_tmax(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int temp; struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct adt7462_data *data = i2c_get_clientdata(client); int tmin, trange_value; long trange; if (kstrtol(buf, 10, &trange)) return -EINVAL; /* trange = tmax - tmin */ tmin = (data->pwm_tmin[attr->index] - 64) * 1000; trange_value = find_trange_value(trange - tmin); if (trange_value < 0) return -EINVAL; temp = trange_value << ADT7462_PWM_RANGE_SHIFT; temp |= data->pwm_trange[attr->index] & ADT7462_PWM_HYST_MASK; mutex_lock(&data->lock); data->pwm_trange[attr->index] = temp; i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_TRANGE(attr->index), temp); mutex_unlock(&data->lock); return count; } static ssize_t show_pwm_tmin(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); return sprintf(buf, "%d\n", 1000 * (data->pwm_tmin[attr->index] - 64)); } static ssize_t set_pwm_tmin(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct adt7462_data *data = i2c_get_clientdata(client); long temp; if (kstrtol(buf, 10, &temp)) return -EINVAL; temp = DIV_ROUND_CLOSEST(temp, 1000) + 64; temp = SENSORS_LIMIT(temp, 0, 255); mutex_lock(&data->lock); data->pwm_tmin[attr->index] = temp; i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_TMIN(attr->index), temp); mutex_unlock(&data->lock); return count; } static ssize_t show_pwm_auto(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); int cfg = data->pwm_cfg[attr->index] >> ADT7462_PWM_CHANNEL_SHIFT; switch (cfg) { case 4: /* off */ return sprintf(buf, "0\n"); case 7: /* manual */ return sprintf(buf, "1\n"); default: /* automatic */ return sprintf(buf, "2\n"); } } static void set_pwm_channel(struct i2c_client *client, struct adt7462_data *data, int which, int value) { int temp = data->pwm_cfg[which] & ~ADT7462_PWM_CHANNEL_MASK; temp |= value << ADT7462_PWM_CHANNEL_SHIFT; mutex_lock(&data->lock); data->pwm_cfg[which] = temp; i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_CFG(which), temp); mutex_unlock(&data->lock); } static ssize_t set_pwm_auto(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct adt7462_data *data = i2c_get_clientdata(client); long temp; if (kstrtol(buf, 10, &temp)) return -EINVAL; switch (temp) { case 0: /* off */ set_pwm_channel(client, data, attr->index, 4); return count; case 1: /* manual */ set_pwm_channel(client, data, attr->index, 7); return count; default: return -EINVAL; } } static ssize_t show_pwm_auto_temp(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adt7462_data *data = adt7462_update_device(dev); int channel = data->pwm_cfg[attr->index] >> ADT7462_PWM_CHANNEL_SHIFT; switch (channel) { case 0: /* temp[1234] only */ case 1: case 2: case 3: return sprintf(buf, "%d\n", (1 << channel)); case 5: /* temp1 & temp4 */ return sprintf(buf, "9\n"); case 6: return sprintf(buf, "15\n"); default: return sprintf(buf, "0\n"); } } static int cvt_auto_temp(int input) { if (input == 0xF) return 6; if (input == 0x9) return 5; if (input < 1 || !is_power_of_2(input)) return -EINVAL; return ilog2(input); } static ssize_t set_pwm_auto_temp(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct adt7462_data *data = i2c_get_clientdata(client); long temp; if (kstrtol(buf, 10, &temp)) return -EINVAL; temp = cvt_auto_temp(temp); if (temp < 0) return temp; set_pwm_channel(client, data, attr->index, temp); return count; } static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max, set_temp_max, 0); static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max, set_temp_max, 1); static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_temp_max, set_temp_max, 2); static SENSOR_DEVICE_ATTR(temp4_max, S_IWUSR | S_IRUGO, show_temp_max, set_temp_max, 3); static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp_min, set_temp_min, 0); static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_min, set_temp_min, 1); static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_temp_min, set_temp_min, 2); static SENSOR_DEVICE_ATTR(temp4_min, S_IWUSR | S_IRUGO, show_temp_min, set_temp_min, 3); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2); static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3); static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2); static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3); static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM1 | ADT7462_LT_ALARM); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM1 | ADT7462_R1T_ALARM); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM1 | ADT7462_R2T_ALARM); static SENSOR_DEVICE_ATTR(temp4_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM1 | ADT7462_R3T_ALARM); static SENSOR_DEVICE_ATTR(in1_max, S_IWUSR | S_IRUGO, show_volt_max, set_volt_max, 0); static SENSOR_DEVICE_ATTR(in2_max, S_IWUSR | S_IRUGO, show_volt_max, set_volt_max, 1); static SENSOR_DEVICE_ATTR(in3_max, S_IWUSR | S_IRUGO, show_volt_max, set_volt_max, 2); static SENSOR_DEVICE_ATTR(in4_max, S_IWUSR | S_IRUGO, show_volt_max, set_volt_max, 3); static SENSOR_DEVICE_ATTR(in5_max, S_IWUSR | S_IRUGO, show_volt_max, set_volt_max, 4); static SENSOR_DEVICE_ATTR(in6_max, S_IWUSR | S_IRUGO, show_volt_max, set_volt_max, 5); static SENSOR_DEVICE_ATTR(in7_max, S_IWUSR | S_IRUGO, show_volt_max, set_volt_max, 6); static SENSOR_DEVICE_ATTR(in8_max, S_IWUSR | S_IRUGO, show_volt_max, set_volt_max, 7); static SENSOR_DEVICE_ATTR(in9_max, S_IWUSR | S_IRUGO, show_volt_max, set_volt_max, 8); static SENSOR_DEVICE_ATTR(in10_max, S_IWUSR | S_IRUGO, show_volt_max, set_volt_max, 9); static SENSOR_DEVICE_ATTR(in11_max, S_IWUSR | S_IRUGO, show_volt_max, set_volt_max, 10); static SENSOR_DEVICE_ATTR(in12_max, S_IWUSR | S_IRUGO, show_volt_max, set_volt_max, 11); static SENSOR_DEVICE_ATTR(in13_max, S_IWUSR | S_IRUGO, show_volt_max, set_volt_max, 12); static SENSOR_DEVICE_ATTR(in1_min, S_IWUSR | S_IRUGO, show_volt_min, set_volt_min, 0); static SENSOR_DEVICE_ATTR(in2_min, S_IWUSR | S_IRUGO, show_volt_min, set_volt_min, 1); static SENSOR_DEVICE_ATTR(in3_min, S_IWUSR | S_IRUGO, show_volt_min, set_volt_min, 2); static SENSOR_DEVICE_ATTR(in4_min, S_IWUSR | S_IRUGO, show_volt_min, set_volt_min, 3); static SENSOR_DEVICE_ATTR(in5_min, S_IWUSR | S_IRUGO, show_volt_min, set_volt_min, 4); static SENSOR_DEVICE_ATTR(in6_min, S_IWUSR | S_IRUGO, show_volt_min, set_volt_min, 5); static SENSOR_DEVICE_ATTR(in7_min, S_IWUSR | S_IRUGO, show_volt_min, set_volt_min, 6); static SENSOR_DEVICE_ATTR(in8_min, S_IWUSR | S_IRUGO, show_volt_min, set_volt_min, 7); static SENSOR_DEVICE_ATTR(in9_min, S_IWUSR | S_IRUGO, show_volt_min, set_volt_min, 8); static SENSOR_DEVICE_ATTR(in10_min, S_IWUSR | S_IRUGO, show_volt_min, set_volt_min, 9); static SENSOR_DEVICE_ATTR(in11_min, S_IWUSR | S_IRUGO, show_volt_min, set_volt_min, 10); static SENSOR_DEVICE_ATTR(in12_min, S_IWUSR | S_IRUGO, show_volt_min, set_volt_min, 11); static SENSOR_DEVICE_ATTR(in13_min, S_IWUSR | S_IRUGO, show_volt_min, set_volt_min, 12); static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 0); static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 1); static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_voltage, NULL, 2); static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_voltage, NULL, 3); static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 4); static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 5); static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 6); static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, show_voltage, NULL, 7); static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, show_voltage, NULL, 8); static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, show_voltage, NULL, 9); static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, show_voltage, NULL, 10); static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, show_voltage, NULL, 11); static SENSOR_DEVICE_ATTR(in13_input, S_IRUGO, show_voltage, NULL, 12); static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, show_voltage_label, NULL, 0); static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, show_voltage_label, NULL, 1); static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_voltage_label, NULL, 2); static SENSOR_DEVICE_ATTR(in4_label, S_IRUGO, show_voltage_label, NULL, 3); static SENSOR_DEVICE_ATTR(in5_label, S_IRUGO, show_voltage_label, NULL, 4); static SENSOR_DEVICE_ATTR(in6_label, S_IRUGO, show_voltage_label, NULL, 5); static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_voltage_label, NULL, 6); static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_voltage_label, NULL, 7); static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_voltage_label, NULL, 8); static SENSOR_DEVICE_ATTR(in10_label, S_IRUGO, show_voltage_label, NULL, 9); static SENSOR_DEVICE_ATTR(in11_label, S_IRUGO, show_voltage_label, NULL, 10); static SENSOR_DEVICE_ATTR(in12_label, S_IRUGO, show_voltage_label, NULL, 11); static SENSOR_DEVICE_ATTR(in13_label, S_IRUGO, show_voltage_label, NULL, 12); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM2 | ADT7462_V0_ALARM); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM2 | ADT7462_V7_ALARM); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM2 | ADT7462_V2_ALARM); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM2 | ADT7462_V6_ALARM); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM2 | ADT7462_V5_ALARM); static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM2 | ADT7462_V4_ALARM); static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM2 | ADT7462_V3_ALARM); static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM2 | ADT7462_V1_ALARM); static SENSOR_DEVICE_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM3 | ADT7462_V10_ALARM); static SENSOR_DEVICE_ATTR(in10_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM3 | ADT7462_V9_ALARM); static SENSOR_DEVICE_ATTR(in11_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM3 | ADT7462_V8_ALARM); static SENSOR_DEVICE_ATTR(in12_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM3 | ADT7462_V11_ALARM); static SENSOR_DEVICE_ATTR(in13_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM3 | ADT7462_V12_ALARM); static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 0); static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 1); static SENSOR_DEVICE_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 2); static SENSOR_DEVICE_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 3); static SENSOR_DEVICE_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 4); static SENSOR_DEVICE_ATTR(fan6_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 5); static SENSOR_DEVICE_ATTR(fan7_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 6); static SENSOR_DEVICE_ATTR(fan8_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 7); static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0); static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1); static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2); static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3); static SENSOR_DEVICE_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4); static SENSOR_DEVICE_ATTR(fan6_input, S_IRUGO, show_fan, NULL, 5); static SENSOR_DEVICE_ATTR(fan7_input, S_IRUGO, show_fan, NULL, 6); static SENSOR_DEVICE_ATTR(fan8_input, S_IRUGO, show_fan, NULL, 7); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM4 | ADT7462_F0_ALARM); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM4 | ADT7462_F1_ALARM); static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM4 | ADT7462_F2_ALARM); static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM4 | ADT7462_F3_ALARM); static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM4 | ADT7462_F4_ALARM); static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM4 | ADT7462_F5_ALARM); static SENSOR_DEVICE_ATTR(fan7_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM4 | ADT7462_F6_ALARM); static SENSOR_DEVICE_ATTR(fan8_alarm, S_IRUGO, show_alarm, NULL, ADT7462_ALARM4 | ADT7462_F7_ALARM); static SENSOR_DEVICE_ATTR(force_pwm_max, S_IWUSR | S_IRUGO, show_force_pwm_max, set_force_pwm_max, 0); static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0); static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1); static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 2); static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 3); static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IWUSR | S_IRUGO, show_pwm_min, set_pwm_min, 0); static SENSOR_DEVICE_ATTR(pwm2_auto_point1_pwm, S_IWUSR | S_IRUGO, show_pwm_min, set_pwm_min, 1); static SENSOR_DEVICE_ATTR(pwm3_auto_point1_pwm, S_IWUSR | S_IRUGO, show_pwm_min, set_pwm_min, 2); static SENSOR_DEVICE_ATTR(pwm4_auto_point1_pwm, S_IWUSR | S_IRUGO, show_pwm_min, set_pwm_min, 3); static SENSOR_DEVICE_ATTR(pwm1_auto_point2_pwm, S_IWUSR | S_IRUGO, show_pwm_max, set_pwm_max, 0); static SENSOR_DEVICE_ATTR(pwm2_auto_point2_pwm, S_IWUSR | S_IRUGO, show_pwm_max, set_pwm_max, 1); static SENSOR_DEVICE_ATTR(pwm3_auto_point2_pwm, S_IWUSR | S_IRUGO, show_pwm_max, set_pwm_max, 2); static SENSOR_DEVICE_ATTR(pwm4_auto_point2_pwm, S_IWUSR | S_IRUGO, show_pwm_max, set_pwm_max, 3); static SENSOR_DEVICE_ATTR(temp1_auto_point1_hyst, S_IWUSR | S_IRUGO, show_pwm_hyst, set_pwm_hyst, 0); static SENSOR_DEVICE_ATTR(temp2_auto_point1_hyst, S_IWUSR | S_IRUGO, show_pwm_hyst, set_pwm_hyst, 1); static SENSOR_DEVICE_ATTR(temp3_auto_point1_hyst, S_IWUSR | S_IRUGO, show_pwm_hyst, set_pwm_hyst, 2); static SENSOR_DEVICE_ATTR(temp4_auto_point1_hyst, S_IWUSR | S_IRUGO, show_pwm_hyst, set_pwm_hyst, 3); static SENSOR_DEVICE_ATTR(temp1_auto_point2_hyst, S_IWUSR | S_IRUGO, show_pwm_hyst, set_pwm_hyst, 0); static SENSOR_DEVICE_ATTR(temp2_auto_point2_hyst, S_IWUSR | S_IRUGO, show_pwm_hyst, set_pwm_hyst, 1); static SENSOR_DEVICE_ATTR(temp3_auto_point2_hyst, S_IWUSR | S_IRUGO, show_pwm_hyst, set_pwm_hyst, 2); static SENSOR_DEVICE_ATTR(temp4_auto_point2_hyst, S_IWUSR | S_IRUGO, show_pwm_hyst, set_pwm_hyst, 3); static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp, S_IWUSR | S_IRUGO, show_pwm_tmin, set_pwm_tmin, 0); static SENSOR_DEVICE_ATTR(temp2_auto_point1_temp, S_IWUSR | S_IRUGO, show_pwm_tmin, set_pwm_tmin, 1); static SENSOR_DEVICE_ATTR(temp3_auto_point1_temp, S_IWUSR | S_IRUGO, show_pwm_tmin, set_pwm_tmin, 2); static SENSOR_DEVICE_ATTR(temp4_auto_point1_temp, S_IWUSR | S_IRUGO, show_pwm_tmin, set_pwm_tmin, 3); static SENSOR_DEVICE_ATTR(temp1_auto_point2_temp, S_IWUSR | S_IRUGO, show_pwm_tmax, set_pwm_tmax, 0); static SENSOR_DEVICE_ATTR(temp2_auto_point2_temp, S_IWUSR | S_IRUGO, show_pwm_tmax, set_pwm_tmax, 1); static SENSOR_DEVICE_ATTR(temp3_auto_point2_temp, S_IWUSR | S_IRUGO, show_pwm_tmax, set_pwm_tmax, 2); static SENSOR_DEVICE_ATTR(temp4_auto_point2_temp, S_IWUSR | S_IRUGO, show_pwm_tmax, set_pwm_tmax, 3); static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_auto, set_pwm_auto, 0); static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_auto, set_pwm_auto, 1); static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_auto, set_pwm_auto, 2); static SENSOR_DEVICE_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, show_pwm_auto, set_pwm_auto, 3); static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IWUSR | S_IRUGO, show_pwm_auto_temp, set_pwm_auto_temp, 0); static SENSOR_DEVICE_ATTR(pwm2_auto_channels_temp, S_IWUSR | S_IRUGO, show_pwm_auto_temp, set_pwm_auto_temp, 1); static SENSOR_DEVICE_ATTR(pwm3_auto_channels_temp, S_IWUSR | S_IRUGO, show_pwm_auto_temp, set_pwm_auto_temp, 2); static SENSOR_DEVICE_ATTR(pwm4_auto_channels_temp, S_IWUSR | S_IRUGO, show_pwm_auto_temp, set_pwm_auto_temp, 3); static struct attribute *adt7462_attr[] = { &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp4_max.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp4_min.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp4_input.dev_attr.attr, &sensor_dev_attr_temp1_label.dev_attr.attr, &sensor_dev_attr_temp2_label.dev_attr.attr, &sensor_dev_attr_temp3_label.dev_attr.attr, &sensor_dev_attr_temp4_label.dev_attr.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, &sensor_dev_attr_temp3_alarm.dev_attr.attr, &sensor_dev_attr_temp4_alarm.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in5_max.dev_attr.attr, &sensor_dev_attr_in6_max.dev_attr.attr, &sensor_dev_attr_in7_max.dev_attr.attr, &sensor_dev_attr_in8_max.dev_attr.attr, &sensor_dev_attr_in9_max.dev_attr.attr, &sensor_dev_attr_in10_max.dev_attr.attr, &sensor_dev_attr_in11_max.dev_attr.attr, &sensor_dev_attr_in12_max.dev_attr.attr, &sensor_dev_attr_in13_max.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in5_min.dev_attr.attr, &sensor_dev_attr_in6_min.dev_attr.attr, &sensor_dev_attr_in7_min.dev_attr.attr, &sensor_dev_attr_in8_min.dev_attr.attr, &sensor_dev_attr_in9_min.dev_attr.attr, &sensor_dev_attr_in10_min.dev_attr.attr, &sensor_dev_attr_in11_min.dev_attr.attr, &sensor_dev_attr_in12_min.dev_attr.attr, &sensor_dev_attr_in13_min.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &sensor_dev_attr_in8_input.dev_attr.attr, &sensor_dev_attr_in9_input.dev_attr.attr, &sensor_dev_attr_in10_input.dev_attr.attr, &sensor_dev_attr_in11_input.dev_attr.attr, &sensor_dev_attr_in12_input.dev_attr.attr, &sensor_dev_attr_in13_input.dev_attr.attr, &sensor_dev_attr_in1_label.dev_attr.attr, &sensor_dev_attr_in2_label.dev_attr.attr, &sensor_dev_attr_in3_label.dev_attr.attr, &sensor_dev_attr_in4_label.dev_attr.attr, &sensor_dev_attr_in5_label.dev_attr.attr, &sensor_dev_attr_in6_label.dev_attr.attr, &sensor_dev_attr_in7_label.dev_attr.attr, &sensor_dev_attr_in8_label.dev_attr.attr, &sensor_dev_attr_in9_label.dev_attr.attr, &sensor_dev_attr_in10_label.dev_attr.attr, &sensor_dev_attr_in11_label.dev_attr.attr, &sensor_dev_attr_in12_label.dev_attr.attr, &sensor_dev_attr_in13_label.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, &sensor_dev_attr_in5_alarm.dev_attr.attr, &sensor_dev_attr_in6_alarm.dev_attr.attr, &sensor_dev_attr_in7_alarm.dev_attr.attr, &sensor_dev_attr_in8_alarm.dev_attr.attr, &sensor_dev_attr_in9_alarm.dev_attr.attr, &sensor_dev_attr_in10_alarm.dev_attr.attr, &sensor_dev_attr_in11_alarm.dev_attr.attr, &sensor_dev_attr_in12_alarm.dev_attr.attr, &sensor_dev_attr_in13_alarm.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan3_min.dev_attr.attr, &sensor_dev_attr_fan4_min.dev_attr.attr, &sensor_dev_attr_fan5_min.dev_attr.attr, &sensor_dev_attr_fan6_min.dev_attr.attr, &sensor_dev_attr_fan7_min.dev_attr.attr, &sensor_dev_attr_fan8_min.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan3_input.dev_attr.attr, &sensor_dev_attr_fan4_input.dev_attr.attr, &sensor_dev_attr_fan5_input.dev_attr.attr, &sensor_dev_attr_fan6_input.dev_attr.attr, &sensor_dev_attr_fan7_input.dev_attr.attr, &sensor_dev_attr_fan8_input.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &sensor_dev_attr_fan3_alarm.dev_attr.attr, &sensor_dev_attr_fan4_alarm.dev_attr.attr, &sensor_dev_attr_fan5_alarm.dev_attr.attr, &sensor_dev_attr_fan6_alarm.dev_attr.attr, &sensor_dev_attr_fan7_alarm.dev_attr.attr, &sensor_dev_attr_fan8_alarm.dev_attr.attr, &sensor_dev_attr_force_pwm_max.dev_attr.attr, &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_pwm2.dev_attr.attr, &sensor_dev_attr_pwm3.dev_attr.attr, &sensor_dev_attr_pwm4.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr, &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr, &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr, &sensor_dev_attr_pwm4_auto_point1_pwm.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr, &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr, &sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr, &sensor_dev_attr_pwm4_auto_point2_pwm.dev_attr.attr, &sensor_dev_attr_temp1_auto_point1_hyst.dev_attr.attr, &sensor_dev_attr_temp2_auto_point1_hyst.dev_attr.attr, &sensor_dev_attr_temp3_auto_point1_hyst.dev_attr.attr, &sensor_dev_attr_temp4_auto_point1_hyst.dev_attr.attr, &sensor_dev_attr_temp1_auto_point2_hyst.dev_attr.attr, &sensor_dev_attr_temp2_auto_point2_hyst.dev_attr.attr, &sensor_dev_attr_temp3_auto_point2_hyst.dev_attr.attr, &sensor_dev_attr_temp4_auto_point2_hyst.dev_attr.attr, &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_temp2_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_temp3_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_temp4_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_temp1_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_temp2_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_temp3_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_temp4_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_pwm1_enable.dev_attr.attr, &sensor_dev_attr_pwm2_enable.dev_attr.attr, &sensor_dev_attr_pwm3_enable.dev_attr.attr, &sensor_dev_attr_pwm4_enable.dev_attr.attr, &sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr, &sensor_dev_attr_pwm2_auto_channels_temp.dev_attr.attr, &sensor_dev_attr_pwm3_auto_channels_temp.dev_attr.attr, &sensor_dev_attr_pwm4_auto_channels_temp.dev_attr.attr, NULL }; /* Return 0 if detection is successful, -ENODEV otherwise */ static int adt7462_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int vendor, device, revision; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; vendor = i2c_smbus_read_byte_data(client, ADT7462_REG_VENDOR); if (vendor != ADT7462_VENDOR) return -ENODEV; device = i2c_smbus_read_byte_data(client, ADT7462_REG_DEVICE); if (device != ADT7462_DEVICE) return -ENODEV; revision = i2c_smbus_read_byte_data(client, ADT7462_REG_REVISION); if (revision != ADT7462_REVISION) return -ENODEV; strlcpy(info->type, "adt7462", I2C_NAME_SIZE); return 0; } static int adt7462_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct adt7462_data *data; int err; data = kzalloc(sizeof(struct adt7462_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); mutex_init(&data->lock); dev_info(&client->dev, "%s chip found\n", client->name); /* Register sysfs hooks */ data->attrs.attrs = adt7462_attr; err = sysfs_create_group(&client->dev.kobj, &data->attrs); if (err) goto exit_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove; } return 0; exit_remove: sysfs_remove_group(&client->dev.kobj, &data->attrs); exit_free: kfree(data); exit: return err; } static int adt7462_remove(struct i2c_client *client) { struct adt7462_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &data->attrs); kfree(data); return 0; } module_i2c_driver(adt7462_driver); MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>"); MODULE_DESCRIPTION("ADT7462 driver"); MODULE_LICENSE("GPL");
gpl-2.0
unitecontrol/linux-sunxi
drivers/hwmon/adcxx.c
4940
6781
/* * adcxx.c * * The adcxx4s is an AD converter family from National Semiconductor (NS). * * Copyright (c) 2008 Marc Pignat <marc.pignat@hevs.ch> * * The adcxx4s communicates with a host processor via an SPI/Microwire Bus * interface. This driver supports the whole family of devices with name * ADC<bb><c>S<sss>, where * * bb is the resolution in number of bits (8, 10, 12) * * c is the number of channels (1, 2, 4, 8) * * sss is the maximum conversion speed (021 for 200 kSPS, 051 for 500 kSPS * and 101 for 1 MSPS) * * Complete datasheets are available at National's website here: * http://www.national.com/ds/DC/ADC<bb><c>S<sss>.pdf * * Handling of 8, 10 and 12 bits converters are the same, the * unavailable bits are 0 :) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/err.h> #include <linux/sysfs.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/mutex.h> #include <linux/mod_devicetable.h> #include <linux/spi/spi.h> #define DRVNAME "adcxx" struct adcxx { struct device *hwmon_dev; struct mutex lock; u32 channels; u32 reference; /* in millivolts */ }; /* sysfs hook function */ static ssize_t adcxx_read(struct device *dev, struct device_attribute *devattr, char *buf) { struct spi_device *spi = to_spi_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adcxx *adc = spi_get_drvdata(spi); u8 tx_buf[2]; u8 rx_buf[2]; int status; u32 value; if (mutex_lock_interruptible(&adc->lock)) return -ERESTARTSYS; if (adc->channels == 1) { status = spi_read(spi, rx_buf, sizeof(rx_buf)); } else { tx_buf[0] = attr->index << 3; /* other bits are don't care */ status = spi_write_then_read(spi, tx_buf, sizeof(tx_buf), rx_buf, sizeof(rx_buf)); } if (status < 0) { dev_warn(dev, "SPI synch. transfer failed with status %d\n", status); goto out; } value = (rx_buf[0] << 8) + rx_buf[1]; dev_dbg(dev, "raw value = 0x%x\n", value); value = value * adc->reference >> 12; status = sprintf(buf, "%d\n", value); out: mutex_unlock(&adc->lock); return status; } static ssize_t adcxx_show_min(struct device *dev, struct device_attribute *devattr, char *buf) { /* The minimum reference is 0 for this chip family */ return sprintf(buf, "0\n"); } static ssize_t adcxx_show_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct spi_device *spi = to_spi_device(dev); struct adcxx *adc = spi_get_drvdata(spi); u32 reference; if (mutex_lock_interruptible(&adc->lock)) return -ERESTARTSYS; reference = adc->reference; mutex_unlock(&adc->lock); return sprintf(buf, "%d\n", reference); } static ssize_t adcxx_set_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct spi_device *spi = to_spi_device(dev); struct adcxx *adc = spi_get_drvdata(spi); unsigned long value; if (kstrtoul(buf, 10, &value)) return -EINVAL; if (mutex_lock_interruptible(&adc->lock)) return -ERESTARTSYS; adc->reference = value; mutex_unlock(&adc->lock); return count; } static ssize_t adcxx_show_name(struct device *dev, struct device_attribute *devattr, char *buf) { struct spi_device *spi = to_spi_device(dev); struct adcxx *adc = spi_get_drvdata(spi); return sprintf(buf, "adcxx%ds\n", adc->channels); } static struct sensor_device_attribute ad_input[] = { SENSOR_ATTR(name, S_IRUGO, adcxx_show_name, NULL, 0), SENSOR_ATTR(in_min, S_IRUGO, adcxx_show_min, NULL, 0), SENSOR_ATTR(in_max, S_IWUSR | S_IRUGO, adcxx_show_max, adcxx_set_max, 0), SENSOR_ATTR(in0_input, S_IRUGO, adcxx_read, NULL, 0), SENSOR_ATTR(in1_input, S_IRUGO, adcxx_read, NULL, 1), SENSOR_ATTR(in2_input, S_IRUGO, adcxx_read, NULL, 2), SENSOR_ATTR(in3_input, S_IRUGO, adcxx_read, NULL, 3), SENSOR_ATTR(in4_input, S_IRUGO, adcxx_read, NULL, 4), SENSOR_ATTR(in5_input, S_IRUGO, adcxx_read, NULL, 5), SENSOR_ATTR(in6_input, S_IRUGO, adcxx_read, NULL, 6), SENSOR_ATTR(in7_input, S_IRUGO, adcxx_read, NULL, 7), }; /*----------------------------------------------------------------------*/ static int __devinit adcxx_probe(struct spi_device *spi) { int channels = spi_get_device_id(spi)->driver_data; struct adcxx *adc; int status; int i; adc = kzalloc(sizeof *adc, GFP_KERNEL); if (!adc) return -ENOMEM; /* set a default value for the reference */ adc->reference = 3300; adc->channels = channels; mutex_init(&adc->lock); mutex_lock(&adc->lock); spi_set_drvdata(spi, adc); for (i = 0; i < 3 + adc->channels; i++) { status = device_create_file(&spi->dev, &ad_input[i].dev_attr); if (status) { dev_err(&spi->dev, "device_create_file failed.\n"); goto out_err; } } adc->hwmon_dev = hwmon_device_register(&spi->dev); if (IS_ERR(adc->hwmon_dev)) { dev_err(&spi->dev, "hwmon_device_register failed.\n"); status = PTR_ERR(adc->hwmon_dev); goto out_err; } mutex_unlock(&adc->lock); return 0; out_err: for (i--; i >= 0; i--) device_remove_file(&spi->dev, &ad_input[i].dev_attr); spi_set_drvdata(spi, NULL); mutex_unlock(&adc->lock); kfree(adc); return status; } static int __devexit adcxx_remove(struct spi_device *spi) { struct adcxx *adc = spi_get_drvdata(spi); int i; mutex_lock(&adc->lock); hwmon_device_unregister(adc->hwmon_dev); for (i = 0; i < 3 + adc->channels; i++) device_remove_file(&spi->dev, &ad_input[i].dev_attr); spi_set_drvdata(spi, NULL); mutex_unlock(&adc->lock); kfree(adc); return 0; } static const struct spi_device_id adcxx_ids[] = { { "adcxx1s", 1 }, { "adcxx2s", 2 }, { "adcxx4s", 4 }, { "adcxx8s", 8 }, { }, }; MODULE_DEVICE_TABLE(spi, adcxx_ids); static struct spi_driver adcxx_driver = { .driver = { .name = "adcxx", .owner = THIS_MODULE, }, .id_table = adcxx_ids, .probe = adcxx_probe, .remove = __devexit_p(adcxx_remove), }; module_spi_driver(adcxx_driver); MODULE_AUTHOR("Marc Pignat"); MODULE_DESCRIPTION("National Semiconductor adcxx8sxxx Linux driver"); MODULE_LICENSE("GPL");
gpl-2.0
kaihua/Simple_kernel_jlo
drivers/staging/usbip/stub_main.c
5196
6523
/* * Copyright (C) 2003-2008 Takahiro Hirofuchi * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #include <linux/string.h> #include <linux/module.h> #include "usbip_common.h" #include "stub.h" #define DRIVER_AUTHOR "Takahiro Hirofuchi" #define DRIVER_DESC "USB/IP Host Driver" struct kmem_cache *stub_priv_cache; /* * busid_tables defines matching busids that usbip can grab. A user can change * dynamically what device is locally used and what device is exported to a * remote host. */ #define MAX_BUSID 16 static struct bus_id_priv busid_table[MAX_BUSID]; static spinlock_t busid_table_lock; static void init_busid_table(void) { int i; memset(busid_table, 0, sizeof(busid_table)); for (i = 0; i < MAX_BUSID; i++) busid_table[i].status = STUB_BUSID_OTHER; spin_lock_init(&busid_table_lock); } /* * Find the index of the busid by name. * Must be called with busid_table_lock held. */ static int get_busid_idx(const char *busid) { int i; int idx = -1; for (i = 0; i < MAX_BUSID; i++) if (busid_table[i].name[0]) if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) { idx = i; break; } return idx; } struct bus_id_priv *get_busid_priv(const char *busid) { int idx; struct bus_id_priv *bid = NULL; spin_lock(&busid_table_lock); idx = get_busid_idx(busid); if (idx >= 0) bid = &(busid_table[idx]); spin_unlock(&busid_table_lock); return bid; } static int add_match_busid(char *busid) { int i; int ret = -1; spin_lock(&busid_table_lock); /* already registered? */ if (get_busid_idx(busid) >= 0) { ret = 0; goto out; } for (i = 0; i < MAX_BUSID; i++) if (!busid_table[i].name[0]) { strncpy(busid_table[i].name, busid, BUSID_SIZE); if ((busid_table[i].status != STUB_BUSID_ALLOC) && (busid_table[i].status != STUB_BUSID_REMOV)) busid_table[i].status = STUB_BUSID_ADDED; ret = 0; break; } out: spin_unlock(&busid_table_lock); return ret; } int del_match_busid(char *busid) { int idx; int ret = -1; spin_lock(&busid_table_lock); idx = get_busid_idx(busid); if (idx < 0) goto out; /* found */ ret = 0; if (busid_table[idx].status == STUB_BUSID_OTHER) memset(busid_table[idx].name, 0, BUSID_SIZE); if ((busid_table[idx].status != STUB_BUSID_OTHER) && (busid_table[idx].status != STUB_BUSID_ADDED)) busid_table[idx].status = STUB_BUSID_REMOV; out: spin_unlock(&busid_table_lock); return ret; } static ssize_t show_match_busid(struct device_driver *drv, char *buf) { int i; char *out = buf; spin_lock(&busid_table_lock); for (i = 0; i < MAX_BUSID; i++) if (busid_table[i].name[0]) out += sprintf(out, "%s ", busid_table[i].name); spin_unlock(&busid_table_lock); out += sprintf(out, "\n"); return out - buf; } static ssize_t store_match_busid(struct device_driver *dev, const char *buf, size_t count) { int len; char busid[BUSID_SIZE]; if (count < 5) return -EINVAL; /* strnlen() does not include \0 */ len = strnlen(buf + 4, BUSID_SIZE); /* busid needs to include \0 termination */ if (!(len < BUSID_SIZE)) return -EINVAL; strncpy(busid, buf + 4, BUSID_SIZE); if (!strncmp(buf, "add ", 4)) { if (add_match_busid(busid) < 0) { return -ENOMEM; } else { pr_debug("add busid %s\n", busid); return count; } } else if (!strncmp(buf, "del ", 4)) { if (del_match_busid(busid) < 0) { return -ENODEV; } else { pr_debug("del busid %s\n", busid); return count; } } else { return -EINVAL; } } static DRIVER_ATTR(match_busid, S_IRUSR | S_IWUSR, show_match_busid, store_match_busid); static struct stub_priv *stub_priv_pop_from_listhead(struct list_head *listhead) { struct stub_priv *priv, *tmp; list_for_each_entry_safe(priv, tmp, listhead, list) { list_del(&priv->list); return priv; } return NULL; } static struct stub_priv *stub_priv_pop(struct stub_device *sdev) { unsigned long flags; struct stub_priv *priv; spin_lock_irqsave(&sdev->priv_lock, flags); priv = stub_priv_pop_from_listhead(&sdev->priv_init); if (priv) goto done; priv = stub_priv_pop_from_listhead(&sdev->priv_tx); if (priv) goto done; priv = stub_priv_pop_from_listhead(&sdev->priv_free); done: spin_unlock_irqrestore(&sdev->priv_lock, flags); return priv; } void stub_device_cleanup_urbs(struct stub_device *sdev) { struct stub_priv *priv; struct urb *urb; dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev); while ((priv = stub_priv_pop(sdev))) { urb = priv->urb; dev_dbg(&sdev->udev->dev, "free urb %p\n", urb); usb_kill_urb(urb); kmem_cache_free(stub_priv_cache, priv); kfree(urb->transfer_buffer); kfree(urb->setup_packet); usb_free_urb(urb); } } static int __init usbip_host_init(void) { int ret; init_busid_table(); stub_priv_cache = KMEM_CACHE(stub_priv, SLAB_HWCACHE_ALIGN); if (!stub_priv_cache) { pr_err("kmem_cache_create failed\n"); return -ENOMEM; } ret = usb_register(&stub_driver); if (ret < 0) { pr_err("usb_register failed %d\n", ret); goto err_usb_register; } ret = driver_create_file(&stub_driver.drvwrap.driver, &driver_attr_match_busid); if (ret < 0) { pr_err("driver_create_file failed\n"); goto err_create_file; } pr_info(DRIVER_DESC " v" USBIP_VERSION "\n"); return ret; err_create_file: usb_deregister(&stub_driver); err_usb_register: kmem_cache_destroy(stub_priv_cache); return ret; } static void __exit usbip_host_exit(void) { driver_remove_file(&stub_driver.drvwrap.driver, &driver_attr_match_busid); /* * deregister() calls stub_disconnect() for all devices. Device * specific data is cleared in stub_disconnect(). */ usb_deregister(&stub_driver); kmem_cache_destroy(stub_priv_cache); } module_init(usbip_host_init); module_exit(usbip_host_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION(USBIP_VERSION);
gpl-2.0
CM-Tab-S/android_kernel_samsung_chagallwifi
fs/ubifs/budget.c
5196
24369
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Adrian Hunter * Artem Bityutskiy (Битюцкий Артём) */ /* * This file implements the budgeting sub-system which is responsible for UBIFS * space management. * * Factors such as compression, wasted space at the ends of LEBs, space in other * journal heads, the effect of updates on the index, and so on, make it * impossible to accurately predict the amount of space needed. Consequently * approximations are used. */ #include "ubifs.h" #include <linux/writeback.h> #include <linux/math64.h> /* * When pessimistic budget calculations say that there is no enough space, * UBIFS starts writing back dirty inodes and pages, doing garbage collection, * or committing. The below constant defines maximum number of times UBIFS * repeats the operations. */ #define MAX_MKSPC_RETRIES 3 /* * The below constant defines amount of dirty pages which should be written * back at when trying to shrink the liability. */ #define NR_TO_WRITE 16 /** * shrink_liability - write-back some dirty pages/inodes. * @c: UBIFS file-system description object * @nr_to_write: how many dirty pages to write-back * * This function shrinks UBIFS liability by means of writing back some amount * of dirty inodes and their pages. * * Note, this function synchronizes even VFS inodes which are locked * (@i_mutex) by the caller of the budgeting function, because write-back does * not touch @i_mutex. */ static void shrink_liability(struct ubifs_info *c, int nr_to_write) { down_read(&c->vfs_sb->s_umount); writeback_inodes_sb(c->vfs_sb, WB_REASON_FS_FREE_SPACE); up_read(&c->vfs_sb->s_umount); } /** * run_gc - run garbage collector. * @c: UBIFS file-system description object * * This function runs garbage collector to make some more free space. Returns * zero if a free LEB has been produced, %-EAGAIN if commit is required, and a * negative error code in case of failure. */ static int run_gc(struct ubifs_info *c) { int err, lnum; /* Make some free space by garbage-collecting dirty space */ down_read(&c->commit_sem); lnum = ubifs_garbage_collect(c, 1); up_read(&c->commit_sem); if (lnum < 0) return lnum; /* GC freed one LEB, return it to lprops */ dbg_budg("GC freed LEB %d", lnum); err = ubifs_return_leb(c, lnum); if (err) return err; return 0; } /** * get_liability - calculate current liability. * @c: UBIFS file-system description object * * This function calculates and returns current UBIFS liability, i.e. the * amount of bytes UBIFS has "promised" to write to the media. */ static long long get_liability(struct ubifs_info *c) { long long liab; spin_lock(&c->space_lock); liab = c->bi.idx_growth + c->bi.data_growth + c->bi.dd_growth; spin_unlock(&c->space_lock); return liab; } /** * make_free_space - make more free space on the file-system. * @c: UBIFS file-system description object * * This function is called when an operation cannot be budgeted because there * is supposedly no free space. But in most cases there is some free space: * o budgeting is pessimistic, so it always budgets more than it is actually * needed, so shrinking the liability is one way to make free space - the * cached data will take less space then it was budgeted for; * o GC may turn some dark space into free space (budgeting treats dark space * as not available); * o commit may free some LEB, i.e., turn freeable LEBs into free LEBs. * * So this function tries to do the above. Returns %-EAGAIN if some free space * was presumably made and the caller has to re-try budgeting the operation. * Returns %-ENOSPC if it couldn't do more free space, and other negative error * codes on failures. */ static int make_free_space(struct ubifs_info *c) { int err, retries = 0; long long liab1, liab2; do { liab1 = get_liability(c); /* * We probably have some dirty pages or inodes (liability), try * to write them back. */ dbg_budg("liability %lld, run write-back", liab1); shrink_liability(c, NR_TO_WRITE); liab2 = get_liability(c); if (liab2 < liab1) return -EAGAIN; dbg_budg("new liability %lld (not shrunk)", liab2); /* Liability did not shrink again, try GC */ dbg_budg("Run GC"); err = run_gc(c); if (!err) return -EAGAIN; if (err != -EAGAIN && err != -ENOSPC) /* Some real error happened */ return err; dbg_budg("Run commit (retries %d)", retries); err = ubifs_run_commit(c); if (err) return err; } while (retries++ < MAX_MKSPC_RETRIES); return -ENOSPC; } /** * ubifs_calc_min_idx_lebs - calculate amount of LEBs for the index. * @c: UBIFS file-system description object * * This function calculates and returns the number of LEBs which should be kept * for index usage. */ int ubifs_calc_min_idx_lebs(struct ubifs_info *c) { int idx_lebs; long long idx_size; idx_size = c->bi.old_idx_sz + c->bi.idx_growth + c->bi.uncommitted_idx; /* And make sure we have thrice the index size of space reserved */ idx_size += idx_size << 1; /* * We do not maintain 'old_idx_size' as 'old_idx_lebs'/'old_idx_bytes' * pair, nor similarly the two variables for the new index size, so we * have to do this costly 64-bit division on fast-path. */ idx_lebs = div_u64(idx_size + c->idx_leb_size - 1, c->idx_leb_size); /* * The index head is not available for the in-the-gaps method, so add an * extra LEB to compensate. */ idx_lebs += 1; if (idx_lebs < MIN_INDEX_LEBS) idx_lebs = MIN_INDEX_LEBS; return idx_lebs; } /** * ubifs_calc_available - calculate available FS space. * @c: UBIFS file-system description object * @min_idx_lebs: minimum number of LEBs reserved for the index * * This function calculates and returns amount of FS space available for use. */ long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs) { int subtract_lebs; long long available; available = c->main_bytes - c->lst.total_used; /* * Now 'available' contains theoretically available flash space * assuming there is no index, so we have to subtract the space which * is reserved for the index. */ subtract_lebs = min_idx_lebs; /* Take into account that GC reserves one LEB for its own needs */ subtract_lebs += 1; /* * The GC journal head LEB is not really accessible. And since * different write types go to different heads, we may count only on * one head's space. */ subtract_lebs += c->jhead_cnt - 1; /* We also reserve one LEB for deletions, which bypass budgeting */ subtract_lebs += 1; available -= (long long)subtract_lebs * c->leb_size; /* Subtract the dead space which is not available for use */ available -= c->lst.total_dead; /* * Subtract dark space, which might or might not be usable - it depends * on the data which we have on the media and which will be written. If * this is a lot of uncompressed or not-compressible data, the dark * space cannot be used. */ available -= c->lst.total_dark; /* * However, there is more dark space. The index may be bigger than * @min_idx_lebs. Those extra LEBs are assumed to be available, but * their dark space is not included in total_dark, so it is subtracted * here. */ if (c->lst.idx_lebs > min_idx_lebs) { subtract_lebs = c->lst.idx_lebs - min_idx_lebs; available -= subtract_lebs * c->dark_wm; } /* The calculations are rough and may end up with a negative number */ return available > 0 ? available : 0; } /** * can_use_rp - check whether the user is allowed to use reserved pool. * @c: UBIFS file-system description object * * UBIFS has so-called "reserved pool" which is flash space reserved * for the superuser and for uses whose UID/GID is recorded in UBIFS superblock. * This function checks whether current user is allowed to use reserved pool. * Returns %1 current user is allowed to use reserved pool and %0 otherwise. */ static int can_use_rp(struct ubifs_info *c) { if (current_fsuid() == c->rp_uid || capable(CAP_SYS_RESOURCE) || (c->rp_gid != 0 && in_group_p(c->rp_gid))) return 1; return 0; } /** * do_budget_space - reserve flash space for index and data growth. * @c: UBIFS file-system description object * * This function makes sure UBIFS has enough free LEBs for index growth and * data. * * When budgeting index space, UBIFS reserves thrice as many LEBs as the index * would take if it was consolidated and written to the flash. This guarantees * that the "in-the-gaps" commit method always succeeds and UBIFS will always * be able to commit dirty index. So this function basically adds amount of * budgeted index space to the size of the current index, multiplies this by 3, * and makes sure this does not exceed the amount of free LEBs. * * Notes about @c->bi.min_idx_lebs and @c->lst.idx_lebs variables: * o @c->lst.idx_lebs is the number of LEBs the index currently uses. It might * be large, because UBIFS does not do any index consolidation as long as * there is free space. IOW, the index may take a lot of LEBs, but the LEBs * will contain a lot of dirt. * o @c->bi.min_idx_lebs is the number of LEBS the index presumably takes. IOW, * the index may be consolidated to take up to @c->bi.min_idx_lebs LEBs. * * This function returns zero in case of success, and %-ENOSPC in case of * failure. */ static int do_budget_space(struct ubifs_info *c) { long long outstanding, available; int lebs, rsvd_idx_lebs, min_idx_lebs; /* First budget index space */ min_idx_lebs = ubifs_calc_min_idx_lebs(c); /* Now 'min_idx_lebs' contains number of LEBs to reserve */ if (min_idx_lebs > c->lst.idx_lebs) rsvd_idx_lebs = min_idx_lebs - c->lst.idx_lebs; else rsvd_idx_lebs = 0; /* * The number of LEBs that are available to be used by the index is: * * @c->lst.empty_lebs + @c->freeable_cnt + @c->idx_gc_cnt - * @c->lst.taken_empty_lebs * * @c->lst.empty_lebs are available because they are empty. * @c->freeable_cnt are available because they contain only free and * dirty space, @c->idx_gc_cnt are available because they are index * LEBs that have been garbage collected and are awaiting the commit * before they can be used. And the in-the-gaps method will grab these * if it needs them. @c->lst.taken_empty_lebs are empty LEBs that have * already been allocated for some purpose. * * Note, @c->idx_gc_cnt is included to both @c->lst.empty_lebs (because * these LEBs are empty) and to @c->lst.taken_empty_lebs (because they * are taken until after the commit). * * Note, @c->lst.taken_empty_lebs may temporarily be higher by one * because of the way we serialize LEB allocations and budgeting. See a * comment in 'ubifs_find_free_space()'. */ lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - c->lst.taken_empty_lebs; if (unlikely(rsvd_idx_lebs > lebs)) { dbg_budg("out of indexing space: min_idx_lebs %d (old %d), " "rsvd_idx_lebs %d", min_idx_lebs, c->bi.min_idx_lebs, rsvd_idx_lebs); return -ENOSPC; } available = ubifs_calc_available(c, min_idx_lebs); outstanding = c->bi.data_growth + c->bi.dd_growth; if (unlikely(available < outstanding)) { dbg_budg("out of data space: available %lld, outstanding %lld", available, outstanding); return -ENOSPC; } if (available - outstanding <= c->rp_size && !can_use_rp(c)) return -ENOSPC; c->bi.min_idx_lebs = min_idx_lebs; return 0; } /** * calc_idx_growth - calculate approximate index growth from budgeting request. * @c: UBIFS file-system description object * @req: budgeting request * * For now we assume each new node adds one znode. But this is rather poor * approximation, though. */ static int calc_idx_growth(const struct ubifs_info *c, const struct ubifs_budget_req *req) { int znodes; znodes = req->new_ino + (req->new_page << UBIFS_BLOCKS_PER_PAGE_SHIFT) + req->new_dent; return znodes * c->max_idx_node_sz; } /** * calc_data_growth - calculate approximate amount of new data from budgeting * request. * @c: UBIFS file-system description object * @req: budgeting request */ static int calc_data_growth(const struct ubifs_info *c, const struct ubifs_budget_req *req) { int data_growth; data_growth = req->new_ino ? c->bi.inode_budget : 0; if (req->new_page) data_growth += c->bi.page_budget; if (req->new_dent) data_growth += c->bi.dent_budget; data_growth += req->new_ino_d; return data_growth; } /** * calc_dd_growth - calculate approximate amount of data which makes other data * dirty from budgeting request. * @c: UBIFS file-system description object * @req: budgeting request */ static int calc_dd_growth(const struct ubifs_info *c, const struct ubifs_budget_req *req) { int dd_growth; dd_growth = req->dirtied_page ? c->bi.page_budget : 0; if (req->dirtied_ino) dd_growth += c->bi.inode_budget << (req->dirtied_ino - 1); if (req->mod_dent) dd_growth += c->bi.dent_budget; dd_growth += req->dirtied_ino_d; return dd_growth; } /** * ubifs_budget_space - ensure there is enough space to complete an operation. * @c: UBIFS file-system description object * @req: budget request * * This function allocates budget for an operation. It uses pessimistic * approximation of how much flash space the operation needs. The goal of this * function is to make sure UBIFS always has flash space to flush all dirty * pages, dirty inodes, and dirty znodes (liability). This function may force * commit, garbage-collection or write-back. Returns zero in case of success, * %-ENOSPC if there is no free space and other negative error codes in case of * failures. */ int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req) { int uninitialized_var(cmt_retries), uninitialized_var(wb_retries); int err, idx_growth, data_growth, dd_growth, retried = 0; ubifs_assert(req->new_page <= 1); ubifs_assert(req->dirtied_page <= 1); ubifs_assert(req->new_dent <= 1); ubifs_assert(req->mod_dent <= 1); ubifs_assert(req->new_ino <= 1); ubifs_assert(req->new_ino_d <= UBIFS_MAX_INO_DATA); ubifs_assert(req->dirtied_ino <= 4); ubifs_assert(req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4); ubifs_assert(!(req->new_ino_d & 7)); ubifs_assert(!(req->dirtied_ino_d & 7)); data_growth = calc_data_growth(c, req); dd_growth = calc_dd_growth(c, req); if (!data_growth && !dd_growth) return 0; idx_growth = calc_idx_growth(c, req); again: spin_lock(&c->space_lock); ubifs_assert(c->bi.idx_growth >= 0); ubifs_assert(c->bi.data_growth >= 0); ubifs_assert(c->bi.dd_growth >= 0); if (unlikely(c->bi.nospace) && (c->bi.nospace_rp || !can_use_rp(c))) { dbg_budg("no space"); spin_unlock(&c->space_lock); return -ENOSPC; } c->bi.idx_growth += idx_growth; c->bi.data_growth += data_growth; c->bi.dd_growth += dd_growth; err = do_budget_space(c); if (likely(!err)) { req->idx_growth = idx_growth; req->data_growth = data_growth; req->dd_growth = dd_growth; spin_unlock(&c->space_lock); return 0; } /* Restore the old values */ c->bi.idx_growth -= idx_growth; c->bi.data_growth -= data_growth; c->bi.dd_growth -= dd_growth; spin_unlock(&c->space_lock); if (req->fast) { dbg_budg("no space for fast budgeting"); return err; } err = make_free_space(c); cond_resched(); if (err == -EAGAIN) { dbg_budg("try again"); goto again; } else if (err == -ENOSPC) { if (!retried) { retried = 1; dbg_budg("-ENOSPC, but anyway try once again"); goto again; } dbg_budg("FS is full, -ENOSPC"); c->bi.nospace = 1; if (can_use_rp(c) || c->rp_size == 0) c->bi.nospace_rp = 1; smp_wmb(); } else ubifs_err("cannot budget space, error %d", err); return err; } /** * ubifs_release_budget - release budgeted free space. * @c: UBIFS file-system description object * @req: budget request * * This function releases the space budgeted by 'ubifs_budget_space()'. Note, * since the index changes (which were budgeted for in @req->idx_growth) will * only be written to the media on commit, this function moves the index budget * from @c->bi.idx_growth to @c->bi.uncommitted_idx. The latter will be zeroed * by the commit operation. */ void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req) { ubifs_assert(req->new_page <= 1); ubifs_assert(req->dirtied_page <= 1); ubifs_assert(req->new_dent <= 1); ubifs_assert(req->mod_dent <= 1); ubifs_assert(req->new_ino <= 1); ubifs_assert(req->new_ino_d <= UBIFS_MAX_INO_DATA); ubifs_assert(req->dirtied_ino <= 4); ubifs_assert(req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4); ubifs_assert(!(req->new_ino_d & 7)); ubifs_assert(!(req->dirtied_ino_d & 7)); if (!req->recalculate) { ubifs_assert(req->idx_growth >= 0); ubifs_assert(req->data_growth >= 0); ubifs_assert(req->dd_growth >= 0); } if (req->recalculate) { req->data_growth = calc_data_growth(c, req); req->dd_growth = calc_dd_growth(c, req); req->idx_growth = calc_idx_growth(c, req); } if (!req->data_growth && !req->dd_growth) return; c->bi.nospace = c->bi.nospace_rp = 0; smp_wmb(); spin_lock(&c->space_lock); c->bi.idx_growth -= req->idx_growth; c->bi.uncommitted_idx += req->idx_growth; c->bi.data_growth -= req->data_growth; c->bi.dd_growth -= req->dd_growth; c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c); ubifs_assert(c->bi.idx_growth >= 0); ubifs_assert(c->bi.data_growth >= 0); ubifs_assert(c->bi.dd_growth >= 0); ubifs_assert(c->bi.min_idx_lebs < c->main_lebs); ubifs_assert(!(c->bi.idx_growth & 7)); ubifs_assert(!(c->bi.data_growth & 7)); ubifs_assert(!(c->bi.dd_growth & 7)); spin_unlock(&c->space_lock); } /** * ubifs_convert_page_budget - convert budget of a new page. * @c: UBIFS file-system description object * * This function converts budget which was allocated for a new page of data to * the budget of changing an existing page of data. The latter is smaller than * the former, so this function only does simple re-calculation and does not * involve any write-back. */ void ubifs_convert_page_budget(struct ubifs_info *c) { spin_lock(&c->space_lock); /* Release the index growth reservation */ c->bi.idx_growth -= c->max_idx_node_sz << UBIFS_BLOCKS_PER_PAGE_SHIFT; /* Release the data growth reservation */ c->bi.data_growth -= c->bi.page_budget; /* Increase the dirty data growth reservation instead */ c->bi.dd_growth += c->bi.page_budget; /* And re-calculate the indexing space reservation */ c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c); spin_unlock(&c->space_lock); } /** * ubifs_release_dirty_inode_budget - release dirty inode budget. * @c: UBIFS file-system description object * @ui: UBIFS inode to release the budget for * * This function releases budget corresponding to a dirty inode. It is usually * called when after the inode has been written to the media and marked as * clean. It also causes the "no space" flags to be cleared. */ void ubifs_release_dirty_inode_budget(struct ubifs_info *c, struct ubifs_inode *ui) { struct ubifs_budget_req req; memset(&req, 0, sizeof(struct ubifs_budget_req)); /* The "no space" flags will be cleared because dd_growth is > 0 */ req.dd_growth = c->bi.inode_budget + ALIGN(ui->data_len, 8); ubifs_release_budget(c, &req); } /** * ubifs_reported_space - calculate reported free space. * @c: the UBIFS file-system description object * @free: amount of free space * * This function calculates amount of free space which will be reported to * user-space. User-space application tend to expect that if the file-system * (e.g., via the 'statfs()' call) reports that it has N bytes available, they * are able to write a file of size N. UBIFS attaches node headers to each data * node and it has to write indexing nodes as well. This introduces additional * overhead, and UBIFS has to report slightly less free space to meet the above * expectations. * * This function assumes free space is made up of uncompressed data nodes and * full index nodes (one per data node, tripled because we always allow enough * space to write the index thrice). * * Note, the calculation is pessimistic, which means that most of the time * UBIFS reports less space than it actually has. */ long long ubifs_reported_space(const struct ubifs_info *c, long long free) { int divisor, factor, f; /* * Reported space size is @free * X, where X is UBIFS block size * divided by UBIFS block size + all overhead one data block * introduces. The overhead is the node header + indexing overhead. * * Indexing overhead calculations are based on the following formula: * I = N/(f - 1) + 1, where I - number of indexing nodes, N - number * of data nodes, f - fanout. Because effective UBIFS fanout is twice * as less than maximum fanout, we assume that each data node * introduces 3 * @c->max_idx_node_sz / (@c->fanout/2 - 1) bytes. * Note, the multiplier 3 is because UBIFS reserves thrice as more space * for the index. */ f = c->fanout > 3 ? c->fanout >> 1 : 2; factor = UBIFS_BLOCK_SIZE; divisor = UBIFS_MAX_DATA_NODE_SZ; divisor += (c->max_idx_node_sz * 3) / (f - 1); free *= factor; return div_u64(free, divisor); } /** * ubifs_get_free_space_nolock - return amount of free space. * @c: UBIFS file-system description object * * This function calculates amount of free space to report to user-space. * * Because UBIFS may introduce substantial overhead (the index, node headers, * alignment, wastage at the end of LEBs, etc), it cannot report real amount of * free flash space it has (well, because not all dirty space is reclaimable, * UBIFS does not actually know the real amount). If UBIFS did so, it would * bread user expectations about what free space is. Users seem to accustomed * to assume that if the file-system reports N bytes of free space, they would * be able to fit a file of N bytes to the FS. This almost works for * traditional file-systems, because they have way less overhead than UBIFS. * So, to keep users happy, UBIFS tries to take the overhead into account. */ long long ubifs_get_free_space_nolock(struct ubifs_info *c) { int rsvd_idx_lebs, lebs; long long available, outstanding, free; ubifs_assert(c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c)); outstanding = c->bi.data_growth + c->bi.dd_growth; available = ubifs_calc_available(c, c->bi.min_idx_lebs); /* * When reporting free space to user-space, UBIFS guarantees that it is * possible to write a file of free space size. This means that for * empty LEBs we may use more precise calculations than * 'ubifs_calc_available()' is using. Namely, we know that in empty * LEBs we would waste only @c->leb_overhead bytes, not @c->dark_wm. * Thus, amend the available space. * * Note, the calculations below are similar to what we have in * 'do_budget_space()', so refer there for comments. */ if (c->bi.min_idx_lebs > c->lst.idx_lebs) rsvd_idx_lebs = c->bi.min_idx_lebs - c->lst.idx_lebs; else rsvd_idx_lebs = 0; lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - c->lst.taken_empty_lebs; lebs -= rsvd_idx_lebs; available += lebs * (c->dark_wm - c->leb_overhead); if (available > outstanding) free = ubifs_reported_space(c, available - outstanding); else free = 0; return free; } /** * ubifs_get_free_space - return amount of free space. * @c: UBIFS file-system description object * * This function calculates and returns amount of free space to report to * user-space. */ long long ubifs_get_free_space(struct ubifs_info *c) { long long free; spin_lock(&c->space_lock); free = ubifs_get_free_space_nolock(c); spin_unlock(&c->space_lock); return free; }
gpl-2.0
Kinoma/acorn_kernel
fs/nfsd/stats.c
7756
2777
/* * procfs-based user access to knfsd statistics * * /proc/net/rpc/nfsd * * Format: * rc <hits> <misses> <nocache> * Statistsics for the reply cache * fh <stale> <total-lookups> <anonlookups> <dir-not-in-dcache> <nondir-not-in-dcache> * statistics for filehandle lookup * io <bytes-read> <bytes-written> * statistics for IO throughput * th <threads> <fullcnt> <10%-20%> <20%-30%> ... <90%-100%> <100%> * time (seconds) when nfsd thread usage above thresholds * and number of times that all threads were in use * ra cache-size <10% <20% <30% ... <100% not-found * number of times that read-ahead entry was found that deep in * the cache. * plus generic RPC stats (see net/sunrpc/stats.c) * * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> */ #include <linux/seq_file.h> #include <linux/module.h> #include <linux/sunrpc/stats.h> #include <linux/nfsd/stats.h> #include <net/net_namespace.h> #include "nfsd.h" struct nfsd_stats nfsdstats; struct svc_stat nfsd_svcstats = { .program = &nfsd_program, }; static int nfsd_proc_show(struct seq_file *seq, void *v) { int i; seq_printf(seq, "rc %u %u %u\nfh %u %u %u %u %u\nio %u %u\n", nfsdstats.rchits, nfsdstats.rcmisses, nfsdstats.rcnocache, nfsdstats.fh_stale, nfsdstats.fh_lookup, nfsdstats.fh_anon, nfsdstats.fh_nocache_dir, nfsdstats.fh_nocache_nondir, nfsdstats.io_read, nfsdstats.io_write); /* thread usage: */ seq_printf(seq, "th %u %u", nfsdstats.th_cnt, nfsdstats.th_fullcnt); for (i=0; i<10; i++) { unsigned int jifs = nfsdstats.th_usage[i]; unsigned int sec = jifs / HZ, msec = (jifs % HZ)*1000/HZ; seq_printf(seq, " %u.%03u", sec, msec); } /* newline and ra-cache */ seq_printf(seq, "\nra %u", nfsdstats.ra_size); for (i=0; i<11; i++) seq_printf(seq, " %u", nfsdstats.ra_depth[i]); seq_putc(seq, '\n'); /* show my rpc info */ svc_seq_show(seq, &nfsd_svcstats); #ifdef CONFIG_NFSD_V4 /* Show count for individual nfsv4 operations */ /* Writing operation numbers 0 1 2 also for maintaining uniformity */ seq_printf(seq,"proc4ops %u", LAST_NFS4_OP + 1); for (i = 0; i <= LAST_NFS4_OP; i++) seq_printf(seq, " %u", nfsdstats.nfs4_opcount[i]); seq_putc(seq, '\n'); #endif return 0; } static int nfsd_proc_open(struct inode *inode, struct file *file) { return single_open(file, nfsd_proc_show, NULL); } static const struct file_operations nfsd_proc_fops = { .owner = THIS_MODULE, .open = nfsd_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; void nfsd_stat_init(void) { svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_fops); } void nfsd_stat_shutdown(void) { svc_proc_unregister(&init_net, "nfsd"); }
gpl-2.0
todorez/galileo-linux-stable
arch/score/kernel/asm-offsets.c
12364
6742
/* * arch/score/kernel/asm-offsets.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Chen Liqin <liqin.chen@sunplusct.com> * Lennox Wu <lennox.wu@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/kbuild.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/sched.h> #include <asm-generic/cmpxchg-local.h> void output_ptreg_defines(void) { COMMENT("SCORE pt_regs offsets."); OFFSET(PT_R0, pt_regs, regs[0]); OFFSET(PT_R1, pt_regs, regs[1]); OFFSET(PT_R2, pt_regs, regs[2]); OFFSET(PT_R3, pt_regs, regs[3]); OFFSET(PT_R4, pt_regs, regs[4]); OFFSET(PT_R5, pt_regs, regs[5]); OFFSET(PT_R6, pt_regs, regs[6]); OFFSET(PT_R7, pt_regs, regs[7]); OFFSET(PT_R8, pt_regs, regs[8]); OFFSET(PT_R9, pt_regs, regs[9]); OFFSET(PT_R10, pt_regs, regs[10]); OFFSET(PT_R11, pt_regs, regs[11]); OFFSET(PT_R12, pt_regs, regs[12]); OFFSET(PT_R13, pt_regs, regs[13]); OFFSET(PT_R14, pt_regs, regs[14]); OFFSET(PT_R15, pt_regs, regs[15]); OFFSET(PT_R16, pt_regs, regs[16]); OFFSET(PT_R17, pt_regs, regs[17]); OFFSET(PT_R18, pt_regs, regs[18]); OFFSET(PT_R19, pt_regs, regs[19]); OFFSET(PT_R20, pt_regs, regs[20]); OFFSET(PT_R21, pt_regs, regs[21]); OFFSET(PT_R22, pt_regs, regs[22]); OFFSET(PT_R23, pt_regs, regs[23]); OFFSET(PT_R24, pt_regs, regs[24]); OFFSET(PT_R25, pt_regs, regs[25]); OFFSET(PT_R26, pt_regs, regs[26]); OFFSET(PT_R27, pt_regs, regs[27]); OFFSET(PT_R28, pt_regs, regs[28]); OFFSET(PT_R29, pt_regs, regs[29]); OFFSET(PT_R30, pt_regs, regs[30]); OFFSET(PT_R31, pt_regs, regs[31]); OFFSET(PT_ORIG_R4, pt_regs, orig_r4); OFFSET(PT_ORIG_R7, pt_regs, orig_r7); OFFSET(PT_CEL, pt_regs, cel); OFFSET(PT_CEH, pt_regs, ceh); OFFSET(PT_SR0, pt_regs, sr0); OFFSET(PT_SR1, pt_regs, sr1); OFFSET(PT_SR2, pt_regs, sr2); OFFSET(PT_EPC, pt_regs, cp0_epc); OFFSET(PT_EMA, pt_regs, cp0_ema); OFFSET(PT_PSR, pt_regs, cp0_psr); OFFSET(PT_ECR, pt_regs, cp0_ecr); OFFSET(PT_CONDITION, pt_regs, cp0_condition); OFFSET(PT_IS_SYSCALL, pt_regs, is_syscall); DEFINE(PT_SIZE, sizeof(struct pt_regs)); BLANK(); } void output_task_defines(void) { COMMENT("SCORE task_struct offsets."); OFFSET(TASK_STATE, task_struct, state); OFFSET(TASK_THREAD_INFO, task_struct, stack); OFFSET(TASK_FLAGS, task_struct, flags); OFFSET(TASK_MM, task_struct, mm); OFFSET(TASK_PID, task_struct, pid); DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); BLANK(); } void output_thread_info_defines(void) { COMMENT("SCORE thread_info offsets."); OFFSET(TI_TASK, thread_info, task); OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain); OFFSET(TI_FLAGS, thread_info, flags); OFFSET(TI_TP_VALUE, thread_info, tp_value); OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); OFFSET(TI_RESTART_BLOCK, thread_info, restart_block); OFFSET(TI_REGS, thread_info, regs); DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE); DEFINE(KERNEL_STACK_MASK, THREAD_MASK); BLANK(); } void output_thread_defines(void) { COMMENT("SCORE specific thread_struct offsets."); OFFSET(THREAD_REG0, task_struct, thread.reg0); OFFSET(THREAD_REG2, task_struct, thread.reg2); OFFSET(THREAD_REG3, task_struct, thread.reg3); OFFSET(THREAD_REG12, task_struct, thread.reg12); OFFSET(THREAD_REG13, task_struct, thread.reg13); OFFSET(THREAD_REG14, task_struct, thread.reg14); OFFSET(THREAD_REG15, task_struct, thread.reg15); OFFSET(THREAD_REG16, task_struct, thread.reg16); OFFSET(THREAD_REG17, task_struct, thread.reg17); OFFSET(THREAD_REG18, task_struct, thread.reg18); OFFSET(THREAD_REG19, task_struct, thread.reg19); OFFSET(THREAD_REG20, task_struct, thread.reg20); OFFSET(THREAD_REG21, task_struct, thread.reg21); OFFSET(THREAD_REG29, task_struct, thread.reg29); OFFSET(THREAD_PSR, task_struct, thread.cp0_psr); OFFSET(THREAD_EMA, task_struct, thread.cp0_ema); OFFSET(THREAD_BADUADDR, task_struct, thread.cp0_baduaddr); OFFSET(THREAD_ECODE, task_struct, thread.error_code); OFFSET(THREAD_TRAPNO, task_struct, thread.trap_no); BLANK(); } void output_mm_defines(void) { COMMENT("Size of struct page"); DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page)); BLANK(); COMMENT("Linux mm_struct offsets."); OFFSET(MM_USERS, mm_struct, mm_users); OFFSET(MM_PGD, mm_struct, pgd); OFFSET(MM_CONTEXT, mm_struct, context); BLANK(); DEFINE(_PAGE_SIZE, PAGE_SIZE); DEFINE(_PAGE_SHIFT, PAGE_SHIFT); BLANK(); DEFINE(_PGD_T_SIZE, sizeof(pgd_t)); DEFINE(_PTE_T_SIZE, sizeof(pte_t)); BLANK(); DEFINE(_PGD_ORDER, PGD_ORDER); DEFINE(_PTE_ORDER, PTE_ORDER); BLANK(); DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); BLANK(); DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD); DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE); BLANK(); } void output_sc_defines(void) { COMMENT("Linux sigcontext offsets."); OFFSET(SC_REGS, sigcontext, sc_regs); OFFSET(SC_MDCEH, sigcontext, sc_mdceh); OFFSET(SC_MDCEL, sigcontext, sc_mdcel); OFFSET(SC_PC, sigcontext, sc_pc); OFFSET(SC_PSR, sigcontext, sc_psr); OFFSET(SC_ECR, sigcontext, sc_ecr); OFFSET(SC_EMA, sigcontext, sc_ema); BLANK(); } void output_signal_defined(void) { COMMENT("Linux signal numbers."); DEFINE(_SIGHUP, SIGHUP); DEFINE(_SIGINT, SIGINT); DEFINE(_SIGQUIT, SIGQUIT); DEFINE(_SIGILL, SIGILL); DEFINE(_SIGTRAP, SIGTRAP); DEFINE(_SIGIOT, SIGIOT); DEFINE(_SIGABRT, SIGABRT); DEFINE(_SIGFPE, SIGFPE); DEFINE(_SIGKILL, SIGKILL); DEFINE(_SIGBUS, SIGBUS); DEFINE(_SIGSEGV, SIGSEGV); DEFINE(_SIGSYS, SIGSYS); DEFINE(_SIGPIPE, SIGPIPE); DEFINE(_SIGALRM, SIGALRM); DEFINE(_SIGTERM, SIGTERM); DEFINE(_SIGUSR1, SIGUSR1); DEFINE(_SIGUSR2, SIGUSR2); DEFINE(_SIGCHLD, SIGCHLD); DEFINE(_SIGPWR, SIGPWR); DEFINE(_SIGWINCH, SIGWINCH); DEFINE(_SIGURG, SIGURG); DEFINE(_SIGIO, SIGIO); DEFINE(_SIGSTOP, SIGSTOP); DEFINE(_SIGTSTP, SIGTSTP); DEFINE(_SIGCONT, SIGCONT); DEFINE(_SIGTTIN, SIGTTIN); DEFINE(_SIGTTOU, SIGTTOU); DEFINE(_SIGVTALRM, SIGVTALRM); DEFINE(_SIGPROF, SIGPROF); DEFINE(_SIGXCPU, SIGXCPU); DEFINE(_SIGXFSZ, SIGXFSZ); BLANK(); }
gpl-2.0
AdrianoMartins/android_kernel_semc_msm7x30
security/integrity/ima/ima_queue.c
77
3747
/* * Copyright (C) 2005,2006,2007,2008 IBM Corporation * * Authors: * Serge Hallyn <serue@us.ibm.com> * Reiner Sailer <sailer@watson.ibm.com> * Mimi Zohar <zohar@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. * * File: ima_queue.c * Implements queues that store template measurements and * maintains aggregate over the stored measurements * in the pre-configured TPM PCR (if available). * The measurement list is append-only. No entry is * ever removed or changed during the boot-cycle. */ #include <linux/module.h> #include <linux/rculist.h> #include "ima.h" LIST_HEAD(ima_measurements); /* list of all measurements */ /* key: inode (before secure-hashing a file) */ struct ima_h_table ima_htable = { .len = ATOMIC_LONG_INIT(0), .violations = ATOMIC_LONG_INIT(0), .queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT }; /* mutex protects atomicity of extending measurement list * and extending the TPM PCR aggregate. Since tpm_extend can take * long (and the tpm driver uses a mutex), we can't use the spinlock. */ static DEFINE_MUTEX(ima_extend_list_mutex); /* lookup up the digest value in the hash table, and return the entry */ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value) { struct ima_queue_entry *qe, *ret = NULL; unsigned int key; struct hlist_node *pos; int rc; key = ima_hash_key(digest_value); rcu_read_lock(); hlist_for_each_entry_rcu(qe, pos, &ima_htable.queue[key], hnext) { rc = memcmp(qe->entry->digest, digest_value, IMA_DIGEST_SIZE); if (rc == 0) { ret = qe; break; } } rcu_read_unlock(); return ret; } /* ima_add_template_entry helper function: * - Add template entry to measurement list and hash table. * * (Called with ima_extend_list_mutex held.) */ static int ima_add_digest_entry(struct ima_template_entry *entry) { struct ima_queue_entry *qe; unsigned int key; qe = kmalloc(sizeof(*qe), GFP_KERNEL); if (qe == NULL) { pr_err("OUT OF MEMORY ERROR creating queue entry.\n"); return -ENOMEM; } qe->entry = entry; INIT_LIST_HEAD(&qe->later); list_add_tail_rcu(&qe->later, &ima_measurements); atomic_long_inc(&ima_htable.len); key = ima_hash_key(entry->digest); hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]); return 0; } static int ima_pcr_extend(const u8 *hash) { int result = 0; if (!ima_used_chip) return result; result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash); if (result != 0) pr_err("Error Communicating to TPM chip\n"); return result; } /* Add template entry to the measurement list and hash table, * and extend the pcr. */ int ima_add_template_entry(struct ima_template_entry *entry, int violation, const char *op, struct inode *inode) { u8 digest[IMA_DIGEST_SIZE]; const char *audit_cause = "hash_added"; int audit_info = 1; int result = 0; mutex_lock(&ima_extend_list_mutex); if (!violation) { memcpy(digest, entry->digest, sizeof digest); if (ima_lookup_digest_entry(digest)) { audit_cause = "hash_exists"; result = -EEXIST; goto out; } } result = ima_add_digest_entry(entry); if (result < 0) { audit_cause = "ENOMEM"; audit_info = 0; goto out; } if (violation) /* invalidate pcr */ memset(digest, 0xff, sizeof digest); result = ima_pcr_extend(digest); if (result != 0) { audit_cause = "TPM error"; audit_info = 0; } out: mutex_unlock(&ima_extend_list_mutex); integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, entry->template.file_name, op, audit_cause, result, audit_info); return result; }
gpl-2.0
cosmoecho/linux_xenvnuma
drivers/usb/gadget/dummy_hcd.c
77
70051
/* * dummy_hcd.c -- Dummy/Loopback USB host and device emulator driver. * * Maintainer: Alan Stern <stern@rowland.harvard.edu> * * Copyright (C) 2003 David Brownell * Copyright (C) 2003-2005 Alan Stern * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ /* * This exposes a device side "USB gadget" API, driven by requests to a * Linux-USB host controller driver. USB traffic is simulated; there's * no need for USB hardware. Use this with two other drivers: * * - Gadget driver, responding to requests (slave); * - Host-side device driver, as already familiar in Linux. * * Having this all in one kernel can help some stages of development, * bypassing some hardware (and driver) issues. UML could help too. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/usb.h> #include <linux/usb/gadget.h> #include <linux/usb/hcd.h> #include <linux/scatterlist.h> #include <asm/byteorder.h> #include <linux/io.h> #include <asm/irq.h> #include <asm/unaligned.h> #define DRIVER_DESC "USB Host+Gadget Emulator" #define DRIVER_VERSION "02 May 2005" #define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */ static const char driver_name[] = "dummy_hcd"; static const char driver_desc[] = "USB Host+Gadget Emulator"; static const char gadget_name[] = "dummy_udc"; MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("David Brownell"); MODULE_LICENSE("GPL"); struct dummy_hcd_module_parameters { bool is_super_speed; bool is_high_speed; unsigned int num; }; static struct dummy_hcd_module_parameters mod_data = { .is_super_speed = false, .is_high_speed = true, .num = 1, }; module_param_named(is_super_speed, mod_data.is_super_speed, bool, S_IRUGO); MODULE_PARM_DESC(is_super_speed, "true to simulate SuperSpeed connection"); module_param_named(is_high_speed, mod_data.is_high_speed, bool, S_IRUGO); MODULE_PARM_DESC(is_high_speed, "true to simulate HighSpeed connection"); module_param_named(num, mod_data.num, uint, S_IRUGO); MODULE_PARM_DESC(num, "number of emulated controllers"); /*-------------------------------------------------------------------------*/ /* gadget side driver data structres */ struct dummy_ep { struct list_head queue; unsigned long last_io; /* jiffies timestamp */ struct usb_gadget *gadget; const struct usb_endpoint_descriptor *desc; struct usb_ep ep; unsigned halted:1; unsigned wedged:1; unsigned already_seen:1; unsigned setup_stage:1; unsigned stream_en:1; }; struct dummy_request { struct list_head queue; /* ep's requests */ struct usb_request req; }; static inline struct dummy_ep *usb_ep_to_dummy_ep(struct usb_ep *_ep) { return container_of(_ep, struct dummy_ep, ep); } static inline struct dummy_request *usb_request_to_dummy_request (struct usb_request *_req) { return container_of(_req, struct dummy_request, req); } /*-------------------------------------------------------------------------*/ /* * Every device has ep0 for control requests, plus up to 30 more endpoints, * in one of two types: * * - Configurable: direction (in/out), type (bulk, iso, etc), and endpoint * number can be changed. Names like "ep-a" are used for this type. * * - Fixed Function: in other cases. some characteristics may be mutable; * that'd be hardware-specific. Names like "ep12out-bulk" are used. * * Gadget drivers are responsible for not setting up conflicting endpoint * configurations, illegal or unsupported packet lengths, and so on. */ static const char ep0name[] = "ep0"; static const char *const ep_name[] = { ep0name, /* everyone has ep0 */ /* act like a pxa250: fifteen fixed function endpoints */ "ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int", "ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int", "ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso", "ep15in-int", /* or like sa1100: two fixed function endpoints */ "ep1out-bulk", "ep2in-bulk", /* and now some generic EPs so we have enough in multi config */ "ep3out", "ep4in", "ep5out", "ep6out", "ep7in", "ep8out", "ep9in", "ep10out", "ep11out", "ep12in", "ep13out", "ep14in", "ep15out", }; #define DUMMY_ENDPOINTS ARRAY_SIZE(ep_name) /*-------------------------------------------------------------------------*/ #define FIFO_SIZE 64 struct urbp { struct urb *urb; struct list_head urbp_list; struct sg_mapping_iter miter; u32 miter_started; }; enum dummy_rh_state { DUMMY_RH_RESET, DUMMY_RH_SUSPENDED, DUMMY_RH_RUNNING }; struct dummy_hcd { struct dummy *dum; enum dummy_rh_state rh_state; struct timer_list timer; u32 port_status; u32 old_status; unsigned long re_timeout; struct usb_device *udev; struct list_head urbp_list; u32 stream_en_ep; u8 num_stream[30 / 2]; unsigned active:1; unsigned old_active:1; unsigned resuming:1; }; struct dummy { spinlock_t lock; /* * SLAVE/GADGET side support */ struct dummy_ep ep[DUMMY_ENDPOINTS]; int address; struct usb_gadget gadget; struct usb_gadget_driver *driver; struct dummy_request fifo_req; u8 fifo_buf[FIFO_SIZE]; u16 devstatus; unsigned udc_suspended:1; unsigned pullup:1; /* * MASTER/HOST side support */ struct dummy_hcd *hs_hcd; struct dummy_hcd *ss_hcd; }; static inline struct dummy_hcd *hcd_to_dummy_hcd(struct usb_hcd *hcd) { return (struct dummy_hcd *) (hcd->hcd_priv); } static inline struct usb_hcd *dummy_hcd_to_hcd(struct dummy_hcd *dum) { return container_of((void *) dum, struct usb_hcd, hcd_priv); } static inline struct device *dummy_dev(struct dummy_hcd *dum) { return dummy_hcd_to_hcd(dum)->self.controller; } static inline struct device *udc_dev(struct dummy *dum) { return dum->gadget.dev.parent; } static inline struct dummy *ep_to_dummy(struct dummy_ep *ep) { return container_of(ep->gadget, struct dummy, gadget); } static inline struct dummy_hcd *gadget_to_dummy_hcd(struct usb_gadget *gadget) { struct dummy *dum = container_of(gadget, struct dummy, gadget); if (dum->gadget.speed == USB_SPEED_SUPER) return dum->ss_hcd; else return dum->hs_hcd; } static inline struct dummy *gadget_dev_to_dummy(struct device *dev) { return container_of(dev, struct dummy, gadget.dev); } /*-------------------------------------------------------------------------*/ /* SLAVE/GADGET SIDE UTILITY ROUTINES */ /* called with spinlock held */ static void nuke(struct dummy *dum, struct dummy_ep *ep) { while (!list_empty(&ep->queue)) { struct dummy_request *req; req = list_entry(ep->queue.next, struct dummy_request, queue); list_del_init(&req->queue); req->req.status = -ESHUTDOWN; spin_unlock(&dum->lock); req->req.complete(&ep->ep, &req->req); spin_lock(&dum->lock); } } /* caller must hold lock */ static void stop_activity(struct dummy *dum) { struct dummy_ep *ep; /* prevent any more requests */ dum->address = 0; /* The timer is left running so that outstanding URBs can fail */ /* nuke any pending requests first, so driver i/o is quiesced */ list_for_each_entry(ep, &dum->gadget.ep_list, ep.ep_list) nuke(dum, ep); /* driver now does any non-usb quiescing necessary */ } /** * set_link_state_by_speed() - Sets the current state of the link according to * the hcd speed * @dum_hcd: pointer to the dummy_hcd structure to update the link state for * * This function updates the port_status according to the link state and the * speed of the hcd. */ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd) { struct dummy *dum = dum_hcd->dum; if (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3) { if ((dum_hcd->port_status & USB_SS_PORT_STAT_POWER) == 0) { dum_hcd->port_status = 0; } else if (!dum->pullup || dum->udc_suspended) { /* UDC suspend must cause a disconnect */ dum_hcd->port_status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE); if ((dum_hcd->old_status & USB_PORT_STAT_CONNECTION) != 0) dum_hcd->port_status |= (USB_PORT_STAT_C_CONNECTION << 16); } else { /* device is connected and not suspended */ dum_hcd->port_status |= (USB_PORT_STAT_CONNECTION | USB_PORT_STAT_SPEED_5GBPS) ; if ((dum_hcd->old_status & USB_PORT_STAT_CONNECTION) == 0) dum_hcd->port_status |= (USB_PORT_STAT_C_CONNECTION << 16); if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 1 && (dum_hcd->port_status & USB_SS_PORT_LS_U0) == 1 && dum_hcd->rh_state != DUMMY_RH_SUSPENDED) dum_hcd->active = 1; } } else { if ((dum_hcd->port_status & USB_PORT_STAT_POWER) == 0) { dum_hcd->port_status = 0; } else if (!dum->pullup || dum->udc_suspended) { /* UDC suspend must cause a disconnect */ dum_hcd->port_status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED | USB_PORT_STAT_SUSPEND); if ((dum_hcd->old_status & USB_PORT_STAT_CONNECTION) != 0) dum_hcd->port_status |= (USB_PORT_STAT_C_CONNECTION << 16); } else { dum_hcd->port_status |= USB_PORT_STAT_CONNECTION; if ((dum_hcd->old_status & USB_PORT_STAT_CONNECTION) == 0) dum_hcd->port_status |= (USB_PORT_STAT_C_CONNECTION << 16); if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0) dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND; else if ((dum_hcd->port_status & USB_PORT_STAT_SUSPEND) == 0 && dum_hcd->rh_state != DUMMY_RH_SUSPENDED) dum_hcd->active = 1; } } } /* caller must hold lock */ static void set_link_state(struct dummy_hcd *dum_hcd) { struct dummy *dum = dum_hcd->dum; dum_hcd->active = 0; if (dum->pullup) if ((dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 && dum->gadget.speed != USB_SPEED_SUPER) || (dummy_hcd_to_hcd(dum_hcd)->speed != HCD_USB3 && dum->gadget.speed == USB_SPEED_SUPER)) return; set_link_state_by_speed(dum_hcd); if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 || dum_hcd->active) dum_hcd->resuming = 0; /* if !connected or reset */ if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 || (dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) { /* * We're connected and not reset (reset occurred now), * and driver attached - disconnect! */ if ((dum_hcd->old_status & USB_PORT_STAT_CONNECTION) != 0 && (dum_hcd->old_status & USB_PORT_STAT_RESET) == 0 && dum->driver) { stop_activity(dum); spin_unlock(&dum->lock); dum->driver->disconnect(&dum->gadget); spin_lock(&dum->lock); } } else if (dum_hcd->active != dum_hcd->old_active) { if (dum_hcd->old_active && dum->driver->suspend) { spin_unlock(&dum->lock); dum->driver->suspend(&dum->gadget); spin_lock(&dum->lock); } else if (!dum_hcd->old_active && dum->driver->resume) { spin_unlock(&dum->lock); dum->driver->resume(&dum->gadget); spin_lock(&dum->lock); } } dum_hcd->old_status = dum_hcd->port_status; dum_hcd->old_active = dum_hcd->active; } /*-------------------------------------------------------------------------*/ /* SLAVE/GADGET SIDE DRIVER * * This only tracks gadget state. All the work is done when the host * side tries some (emulated) i/o operation. Real device controller * drivers would do real i/o using dma, fifos, irqs, timers, etc. */ #define is_enabled(dum) \ (dum->port_status & USB_PORT_STAT_ENABLE) static int dummy_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct dummy *dum; struct dummy_hcd *dum_hcd; struct dummy_ep *ep; unsigned max; int retval; ep = usb_ep_to_dummy_ep(_ep); if (!_ep || !desc || ep->desc || _ep->name == ep0name || desc->bDescriptorType != USB_DT_ENDPOINT) return -EINVAL; dum = ep_to_dummy(ep); if (!dum->driver) return -ESHUTDOWN; dum_hcd = gadget_to_dummy_hcd(&dum->gadget); if (!is_enabled(dum_hcd)) return -ESHUTDOWN; /* * For HS/FS devices only bits 0..10 of the wMaxPacketSize represent the * maximum packet size. * For SS devices the wMaxPacketSize is limited by 1024. */ max = usb_endpoint_maxp(desc) & 0x7ff; /* drivers must not request bad settings, since lower levels * (hardware or its drivers) may not check. some endpoints * can't do iso, many have maxpacket limitations, etc. * * since this "hardware" driver is here to help debugging, we * have some extra sanity checks. (there could be more though, * especially for "ep9out" style fixed function ones.) */ retval = -EINVAL; switch (usb_endpoint_type(desc)) { case USB_ENDPOINT_XFER_BULK: if (strstr(ep->ep.name, "-iso") || strstr(ep->ep.name, "-int")) { goto done; } switch (dum->gadget.speed) { case USB_SPEED_SUPER: if (max == 1024) break; goto done; case USB_SPEED_HIGH: if (max == 512) break; goto done; case USB_SPEED_FULL: if (max == 8 || max == 16 || max == 32 || max == 64) /* we'll fake any legal size */ break; /* save a return statement */ default: goto done; } break; case USB_ENDPOINT_XFER_INT: if (strstr(ep->ep.name, "-iso")) /* bulk is ok */ goto done; /* real hardware might not handle all packet sizes */ switch (dum->gadget.speed) { case USB_SPEED_SUPER: case USB_SPEED_HIGH: if (max <= 1024) break; /* save a return statement */ case USB_SPEED_FULL: if (max <= 64) break; /* save a return statement */ default: if (max <= 8) break; goto done; } break; case USB_ENDPOINT_XFER_ISOC: if (strstr(ep->ep.name, "-bulk") || strstr(ep->ep.name, "-int")) goto done; /* real hardware might not handle all packet sizes */ switch (dum->gadget.speed) { case USB_SPEED_SUPER: case USB_SPEED_HIGH: if (max <= 1024) break; /* save a return statement */ case USB_SPEED_FULL: if (max <= 1023) break; /* save a return statement */ default: goto done; } break; default: /* few chips support control except on ep0 */ goto done; } _ep->maxpacket = max; if (usb_ss_max_streams(_ep->comp_desc)) { if (!usb_endpoint_xfer_bulk(desc)) { dev_err(udc_dev(dum), "Can't enable stream support on " "non-bulk ep %s\n", _ep->name); return -EINVAL; } ep->stream_en = 1; } ep->desc = desc; dev_dbg(udc_dev(dum), "enabled %s (ep%d%s-%s) maxpacket %d stream %s\n", _ep->name, desc->bEndpointAddress & 0x0f, (desc->bEndpointAddress & USB_DIR_IN) ? "in" : "out", ({ char *val; switch (usb_endpoint_type(desc)) { case USB_ENDPOINT_XFER_BULK: val = "bulk"; break; case USB_ENDPOINT_XFER_ISOC: val = "iso"; break; case USB_ENDPOINT_XFER_INT: val = "intr"; break; default: val = "ctrl"; break; } val; }), max, ep->stream_en ? "enabled" : "disabled"); /* at this point real hardware should be NAKing transfers * to that endpoint, until a buffer is queued to it. */ ep->halted = ep->wedged = 0; retval = 0; done: return retval; } static int dummy_disable(struct usb_ep *_ep) { struct dummy_ep *ep; struct dummy *dum; unsigned long flags; int retval; ep = usb_ep_to_dummy_ep(_ep); if (!_ep || !ep->desc || _ep->name == ep0name) return -EINVAL; dum = ep_to_dummy(ep); spin_lock_irqsave(&dum->lock, flags); ep->desc = NULL; ep->stream_en = 0; retval = 0; nuke(dum, ep); spin_unlock_irqrestore(&dum->lock, flags); dev_dbg(udc_dev(dum), "disabled %s\n", _ep->name); return retval; } static struct usb_request *dummy_alloc_request(struct usb_ep *_ep, gfp_t mem_flags) { struct dummy_ep *ep; struct dummy_request *req; if (!_ep) return NULL; ep = usb_ep_to_dummy_ep(_ep); req = kzalloc(sizeof(*req), mem_flags); if (!req) return NULL; INIT_LIST_HEAD(&req->queue); return &req->req; } static void dummy_free_request(struct usb_ep *_ep, struct usb_request *_req) { struct dummy_request *req; if (!_ep || !_req) { WARN_ON(1); return; } req = usb_request_to_dummy_request(_req); WARN_ON(!list_empty(&req->queue)); kfree(req); } static void fifo_complete(struct usb_ep *ep, struct usb_request *req) { } static int dummy_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t mem_flags) { struct dummy_ep *ep; struct dummy_request *req; struct dummy *dum; struct dummy_hcd *dum_hcd; unsigned long flags; req = usb_request_to_dummy_request(_req); if (!_req || !list_empty(&req->queue) || !_req->complete) return -EINVAL; ep = usb_ep_to_dummy_ep(_ep); if (!_ep || (!ep->desc && _ep->name != ep0name)) return -EINVAL; dum = ep_to_dummy(ep); dum_hcd = gadget_to_dummy_hcd(&dum->gadget); if (!dum->driver || !is_enabled(dum_hcd)) return -ESHUTDOWN; #if 0 dev_dbg(udc_dev(dum), "ep %p queue req %p to %s, len %d buf %p\n", ep, _req, _ep->name, _req->length, _req->buf); #endif _req->status = -EINPROGRESS; _req->actual = 0; spin_lock_irqsave(&dum->lock, flags); /* implement an emulated single-request FIFO */ if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) && list_empty(&dum->fifo_req.queue) && list_empty(&ep->queue) && _req->length <= FIFO_SIZE) { req = &dum->fifo_req; req->req = *_req; req->req.buf = dum->fifo_buf; memcpy(dum->fifo_buf, _req->buf, _req->length); req->req.context = dum; req->req.complete = fifo_complete; list_add_tail(&req->queue, &ep->queue); spin_unlock(&dum->lock); _req->actual = _req->length; _req->status = 0; _req->complete(_ep, _req); spin_lock(&dum->lock); } else list_add_tail(&req->queue, &ep->queue); spin_unlock_irqrestore(&dum->lock, flags); /* real hardware would likely enable transfers here, in case * it'd been left NAKing. */ return 0; } static int dummy_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct dummy_ep *ep; struct dummy *dum; int retval = -EINVAL; unsigned long flags; struct dummy_request *req = NULL; if (!_ep || !_req) return retval; ep = usb_ep_to_dummy_ep(_ep); dum = ep_to_dummy(ep); if (!dum->driver) return -ESHUTDOWN; local_irq_save(flags); spin_lock(&dum->lock); list_for_each_entry(req, &ep->queue, queue) { if (&req->req == _req) { list_del_init(&req->queue); _req->status = -ECONNRESET; retval = 0; break; } } spin_unlock(&dum->lock); if (retval == 0) { dev_dbg(udc_dev(dum), "dequeued req %p from %s, len %d buf %p\n", req, _ep->name, _req->length, _req->buf); _req->complete(_ep, _req); } local_irq_restore(flags); return retval; } static int dummy_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged) { struct dummy_ep *ep; struct dummy *dum; if (!_ep) return -EINVAL; ep = usb_ep_to_dummy_ep(_ep); dum = ep_to_dummy(ep); if (!dum->driver) return -ESHUTDOWN; if (!value) ep->halted = ep->wedged = 0; else if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) && !list_empty(&ep->queue)) return -EAGAIN; else { ep->halted = 1; if (wedged) ep->wedged = 1; } /* FIXME clear emulated data toggle too */ return 0; } static int dummy_set_halt(struct usb_ep *_ep, int value) { return dummy_set_halt_and_wedge(_ep, value, 0); } static int dummy_set_wedge(struct usb_ep *_ep) { if (!_ep || _ep->name == ep0name) return -EINVAL; return dummy_set_halt_and_wedge(_ep, 1, 1); } static const struct usb_ep_ops dummy_ep_ops = { .enable = dummy_enable, .disable = dummy_disable, .alloc_request = dummy_alloc_request, .free_request = dummy_free_request, .queue = dummy_queue, .dequeue = dummy_dequeue, .set_halt = dummy_set_halt, .set_wedge = dummy_set_wedge, }; /*-------------------------------------------------------------------------*/ /* there are both host and device side versions of this call ... */ static int dummy_g_get_frame(struct usb_gadget *_gadget) { struct timeval tv; do_gettimeofday(&tv); return tv.tv_usec / 1000; } static int dummy_wakeup(struct usb_gadget *_gadget) { struct dummy_hcd *dum_hcd; dum_hcd = gadget_to_dummy_hcd(_gadget); if (!(dum_hcd->dum->devstatus & ((1 << USB_DEVICE_B_HNP_ENABLE) | (1 << USB_DEVICE_REMOTE_WAKEUP)))) return -EINVAL; if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0) return -ENOLINK; if ((dum_hcd->port_status & USB_PORT_STAT_SUSPEND) == 0 && dum_hcd->rh_state != DUMMY_RH_SUSPENDED) return -EIO; /* FIXME: What if the root hub is suspended but the port isn't? */ /* hub notices our request, issues downstream resume, etc */ dum_hcd->resuming = 1; dum_hcd->re_timeout = jiffies + msecs_to_jiffies(20); mod_timer(&dummy_hcd_to_hcd(dum_hcd)->rh_timer, dum_hcd->re_timeout); return 0; } static int dummy_set_selfpowered(struct usb_gadget *_gadget, int value) { struct dummy *dum; dum = gadget_to_dummy_hcd(_gadget)->dum; if (value) dum->devstatus |= (1 << USB_DEVICE_SELF_POWERED); else dum->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED); return 0; } static void dummy_udc_update_ep0(struct dummy *dum) { if (dum->gadget.speed == USB_SPEED_SUPER) dum->ep[0].ep.maxpacket = 9; else dum->ep[0].ep.maxpacket = 64; } static int dummy_pullup(struct usb_gadget *_gadget, int value) { struct dummy_hcd *dum_hcd; struct dummy *dum; unsigned long flags; dum = gadget_dev_to_dummy(&_gadget->dev); if (value && dum->driver) { if (mod_data.is_super_speed) dum->gadget.speed = dum->driver->max_speed; else if (mod_data.is_high_speed) dum->gadget.speed = min_t(u8, USB_SPEED_HIGH, dum->driver->max_speed); else dum->gadget.speed = USB_SPEED_FULL; dummy_udc_update_ep0(dum); if (dum->gadget.speed < dum->driver->max_speed) dev_dbg(udc_dev(dum), "This device can perform faster" " if you connect it to a %s port...\n", usb_speed_string(dum->driver->max_speed)); } dum_hcd = gadget_to_dummy_hcd(_gadget); spin_lock_irqsave(&dum->lock, flags); dum->pullup = (value != 0); set_link_state(dum_hcd); spin_unlock_irqrestore(&dum->lock, flags); usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd)); return 0; } static int dummy_udc_start(struct usb_gadget *g, struct usb_gadget_driver *driver); static int dummy_udc_stop(struct usb_gadget *g, struct usb_gadget_driver *driver); static const struct usb_gadget_ops dummy_ops = { .get_frame = dummy_g_get_frame, .wakeup = dummy_wakeup, .set_selfpowered = dummy_set_selfpowered, .pullup = dummy_pullup, .udc_start = dummy_udc_start, .udc_stop = dummy_udc_stop, }; /*-------------------------------------------------------------------------*/ /* "function" sysfs attribute */ static ssize_t function_show(struct device *dev, struct device_attribute *attr, char *buf) { struct dummy *dum = gadget_dev_to_dummy(dev); if (!dum->driver || !dum->driver->function) return 0; return scnprintf(buf, PAGE_SIZE, "%s\n", dum->driver->function); } static DEVICE_ATTR_RO(function); /*-------------------------------------------------------------------------*/ /* * Driver registration/unregistration. * * This is basically hardware-specific; there's usually only one real USB * device (not host) controller since that's how USB devices are intended * to work. So most implementations of these api calls will rely on the * fact that only one driver will ever bind to the hardware. But curious * hardware can be built with discrete components, so the gadget API doesn't * require that assumption. * * For this emulator, it might be convenient to create a usb slave device * for each driver that registers: just add to a big root hub. */ static int dummy_udc_start(struct usb_gadget *g, struct usb_gadget_driver *driver) { struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); struct dummy *dum = dum_hcd->dum; if (driver->max_speed == USB_SPEED_UNKNOWN) return -EINVAL; /* * SLAVE side init ... the layer above hardware, which * can't enumerate without help from the driver we're binding. */ dum->devstatus = 0; dum->driver = driver; dev_dbg(udc_dev(dum), "binding gadget driver '%s'\n", driver->driver.name); return 0; } static int dummy_udc_stop(struct usb_gadget *g, struct usb_gadget_driver *driver) { struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); struct dummy *dum = dum_hcd->dum; if (driver) dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n", driver->driver.name); dum->driver = NULL; return 0; } #undef is_enabled /* The gadget structure is stored inside the hcd structure and will be * released along with it. */ static void init_dummy_udc_hw(struct dummy *dum) { int i; INIT_LIST_HEAD(&dum->gadget.ep_list); for (i = 0; i < DUMMY_ENDPOINTS; i++) { struct dummy_ep *ep = &dum->ep[i]; if (!ep_name[i]) break; ep->ep.name = ep_name[i]; ep->ep.ops = &dummy_ep_ops; list_add_tail(&ep->ep.ep_list, &dum->gadget.ep_list); ep->halted = ep->wedged = ep->already_seen = ep->setup_stage = 0; ep->ep.maxpacket = ~0; ep->ep.max_streams = 16; ep->last_io = jiffies; ep->gadget = &dum->gadget; ep->desc = NULL; INIT_LIST_HEAD(&ep->queue); } dum->gadget.ep0 = &dum->ep[0].ep; list_del_init(&dum->ep[0].ep.ep_list); INIT_LIST_HEAD(&dum->fifo_req.queue); #ifdef CONFIG_USB_OTG dum->gadget.is_otg = 1; #endif } static int dummy_udc_probe(struct platform_device *pdev) { struct dummy *dum; int rc; dum = *((void **)dev_get_platdata(&pdev->dev)); dum->gadget.name = gadget_name; dum->gadget.ops = &dummy_ops; dum->gadget.max_speed = USB_SPEED_SUPER; dum->gadget.dev.parent = &pdev->dev; init_dummy_udc_hw(dum); rc = usb_add_gadget_udc(&pdev->dev, &dum->gadget); if (rc < 0) goto err_udc; rc = device_create_file(&dum->gadget.dev, &dev_attr_function); if (rc < 0) goto err_dev; platform_set_drvdata(pdev, dum); return rc; err_dev: usb_del_gadget_udc(&dum->gadget); err_udc: return rc; } static int dummy_udc_remove(struct platform_device *pdev) { struct dummy *dum = platform_get_drvdata(pdev); device_remove_file(&dum->gadget.dev, &dev_attr_function); usb_del_gadget_udc(&dum->gadget); return 0; } static void dummy_udc_pm(struct dummy *dum, struct dummy_hcd *dum_hcd, int suspend) { spin_lock_irq(&dum->lock); dum->udc_suspended = suspend; set_link_state(dum_hcd); spin_unlock_irq(&dum->lock); } static int dummy_udc_suspend(struct platform_device *pdev, pm_message_t state) { struct dummy *dum = platform_get_drvdata(pdev); struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(&dum->gadget); dev_dbg(&pdev->dev, "%s\n", __func__); dummy_udc_pm(dum, dum_hcd, 1); usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd)); return 0; } static int dummy_udc_resume(struct platform_device *pdev) { struct dummy *dum = platform_get_drvdata(pdev); struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(&dum->gadget); dev_dbg(&pdev->dev, "%s\n", __func__); dummy_udc_pm(dum, dum_hcd, 0); usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd)); return 0; } static struct platform_driver dummy_udc_driver = { .probe = dummy_udc_probe, .remove = dummy_udc_remove, .suspend = dummy_udc_suspend, .resume = dummy_udc_resume, .driver = { .name = (char *) gadget_name, .owner = THIS_MODULE, }, }; /*-------------------------------------------------------------------------*/ static unsigned int dummy_get_ep_idx(const struct usb_endpoint_descriptor *desc) { unsigned int index; index = usb_endpoint_num(desc) << 1; if (usb_endpoint_dir_in(desc)) index |= 1; return index; } /* MASTER/HOST SIDE DRIVER * * this uses the hcd framework to hook up to host side drivers. * its root hub will only have one device, otherwise it acts like * a normal host controller. * * when urbs are queued, they're just stuck on a list that we * scan in a timer callback. that callback connects writes from * the host with reads from the device, and so on, based on the * usb 2.0 rules. */ static int dummy_ep_stream_en(struct dummy_hcd *dum_hcd, struct urb *urb) { const struct usb_endpoint_descriptor *desc = &urb->ep->desc; u32 index; if (!usb_endpoint_xfer_bulk(desc)) return 0; index = dummy_get_ep_idx(desc); return (1 << index) & dum_hcd->stream_en_ep; } /* * The max stream number is saved as a nibble so for the 30 possible endpoints * we only 15 bytes of memory. Therefore we are limited to max 16 streams (0 * means we use only 1 stream). The maximum according to the spec is 16bit so * if the 16 stream limit is about to go, the array size should be incremented * to 30 elements of type u16. */ static int get_max_streams_for_pipe(struct dummy_hcd *dum_hcd, unsigned int pipe) { int max_streams; max_streams = dum_hcd->num_stream[usb_pipeendpoint(pipe)]; if (usb_pipeout(pipe)) max_streams >>= 4; else max_streams &= 0xf; max_streams++; return max_streams; } static void set_max_streams_for_pipe(struct dummy_hcd *dum_hcd, unsigned int pipe, unsigned int streams) { int max_streams; streams--; max_streams = dum_hcd->num_stream[usb_pipeendpoint(pipe)]; if (usb_pipeout(pipe)) { streams <<= 4; max_streams &= 0xf; } else { max_streams &= 0xf0; } max_streams |= streams; dum_hcd->num_stream[usb_pipeendpoint(pipe)] = max_streams; } static int dummy_validate_stream(struct dummy_hcd *dum_hcd, struct urb *urb) { unsigned int max_streams; int enabled; enabled = dummy_ep_stream_en(dum_hcd, urb); if (!urb->stream_id) { if (enabled) return -EINVAL; return 0; } if (!enabled) return -EINVAL; max_streams = get_max_streams_for_pipe(dum_hcd, usb_pipeendpoint(urb->pipe)); if (urb->stream_id > max_streams) { dev_err(dummy_dev(dum_hcd), "Stream id %d is out of range.\n", urb->stream_id); BUG(); return -EINVAL; } return 0; } static int dummy_urb_enqueue( struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags ) { struct dummy_hcd *dum_hcd; struct urbp *urbp; unsigned long flags; int rc; urbp = kmalloc(sizeof *urbp, mem_flags); if (!urbp) return -ENOMEM; urbp->urb = urb; urbp->miter_started = 0; dum_hcd = hcd_to_dummy_hcd(hcd); spin_lock_irqsave(&dum_hcd->dum->lock, flags); rc = dummy_validate_stream(dum_hcd, urb); if (rc) { kfree(urbp); goto done; } rc = usb_hcd_link_urb_to_ep(hcd, urb); if (rc) { kfree(urbp); goto done; } if (!dum_hcd->udev) { dum_hcd->udev = urb->dev; usb_get_dev(dum_hcd->udev); } else if (unlikely(dum_hcd->udev != urb->dev)) dev_err(dummy_dev(dum_hcd), "usb_device address has changed!\n"); list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list); urb->hcpriv = urbp; if (usb_pipetype(urb->pipe) == PIPE_CONTROL) urb->error_count = 1; /* mark as a new urb */ /* kick the scheduler, it'll do the rest */ if (!timer_pending(&dum_hcd->timer)) mod_timer(&dum_hcd->timer, jiffies + 1); done: spin_unlock_irqrestore(&dum_hcd->dum->lock, flags); return rc; } static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { struct dummy_hcd *dum_hcd; unsigned long flags; int rc; /* giveback happens automatically in timer callback, * so make sure the callback happens */ dum_hcd = hcd_to_dummy_hcd(hcd); spin_lock_irqsave(&dum_hcd->dum->lock, flags); rc = usb_hcd_check_unlink_urb(hcd, urb, status); if (!rc && dum_hcd->rh_state != DUMMY_RH_RUNNING && !list_empty(&dum_hcd->urbp_list)) mod_timer(&dum_hcd->timer, jiffies); spin_unlock_irqrestore(&dum_hcd->dum->lock, flags); return rc; } static int dummy_perform_transfer(struct urb *urb, struct dummy_request *req, u32 len) { void *ubuf, *rbuf; struct urbp *urbp = urb->hcpriv; int to_host; struct sg_mapping_iter *miter = &urbp->miter; u32 trans = 0; u32 this_sg; bool next_sg; to_host = usb_pipein(urb->pipe); rbuf = req->req.buf + req->req.actual; if (!urb->num_sgs) { ubuf = urb->transfer_buffer + urb->actual_length; if (to_host) memcpy(ubuf, rbuf, len); else memcpy(rbuf, ubuf, len); return len; } if (!urbp->miter_started) { u32 flags = SG_MITER_ATOMIC; if (to_host) flags |= SG_MITER_TO_SG; else flags |= SG_MITER_FROM_SG; sg_miter_start(miter, urb->sg, urb->num_sgs, flags); urbp->miter_started = 1; } next_sg = sg_miter_next(miter); if (next_sg == false) { WARN_ON_ONCE(1); return -EINVAL; } do { ubuf = miter->addr; this_sg = min_t(u32, len, miter->length); miter->consumed = this_sg; trans += this_sg; if (to_host) memcpy(ubuf, rbuf, this_sg); else memcpy(rbuf, ubuf, this_sg); len -= this_sg; if (!len) break; next_sg = sg_miter_next(miter); if (next_sg == false) { WARN_ON_ONCE(1); return -EINVAL; } rbuf += this_sg; } while (1); sg_miter_stop(miter); return trans; } /* transfer up to a frame's worth; caller must own lock */ static int transfer(struct dummy_hcd *dum_hcd, struct urb *urb, struct dummy_ep *ep, int limit, int *status) { struct dummy *dum = dum_hcd->dum; struct dummy_request *req; top: /* if there's no request queued, the device is NAKing; return */ list_for_each_entry(req, &ep->queue, queue) { unsigned host_len, dev_len, len; int is_short, to_host; int rescan = 0; if (dummy_ep_stream_en(dum_hcd, urb)) { if ((urb->stream_id != req->req.stream_id)) continue; } /* 1..N packets of ep->ep.maxpacket each ... the last one * may be short (including zero length). * * writer can send a zlp explicitly (length 0) or implicitly * (length mod maxpacket zero, and 'zero' flag); they always * terminate reads. */ host_len = urb->transfer_buffer_length - urb->actual_length; dev_len = req->req.length - req->req.actual; len = min(host_len, dev_len); /* FIXME update emulated data toggle too */ to_host = usb_pipein(urb->pipe); if (unlikely(len == 0)) is_short = 1; else { /* not enough bandwidth left? */ if (limit < ep->ep.maxpacket && limit < len) break; len = min_t(unsigned, len, limit); if (len == 0) break; /* use an extra pass for the final short packet */ if (len > ep->ep.maxpacket) { rescan = 1; len -= (len % ep->ep.maxpacket); } is_short = (len % ep->ep.maxpacket) != 0; len = dummy_perform_transfer(urb, req, len); ep->last_io = jiffies; if ((int)len < 0) { req->req.status = len; } else { limit -= len; urb->actual_length += len; req->req.actual += len; } } /* short packets terminate, maybe with overflow/underflow. * it's only really an error to write too much. * * partially filling a buffer optionally blocks queue advances * (so completion handlers can clean up the queue) but we don't * need to emulate such data-in-flight. */ if (is_short) { if (host_len == dev_len) { req->req.status = 0; *status = 0; } else if (to_host) { req->req.status = 0; if (dev_len > host_len) *status = -EOVERFLOW; else *status = 0; } else if (!to_host) { *status = 0; if (host_len > dev_len) req->req.status = -EOVERFLOW; else req->req.status = 0; } /* many requests terminate without a short packet */ } else { if (req->req.length == req->req.actual && !req->req.zero) req->req.status = 0; if (urb->transfer_buffer_length == urb->actual_length && !(urb->transfer_flags & URB_ZERO_PACKET)) *status = 0; } /* device side completion --> continuable */ if (req->req.status != -EINPROGRESS) { list_del_init(&req->queue); spin_unlock(&dum->lock); req->req.complete(&ep->ep, &req->req); spin_lock(&dum->lock); /* requests might have been unlinked... */ rescan = 1; } /* host side completion --> terminate */ if (*status != -EINPROGRESS) break; /* rescan to continue with any other queued i/o */ if (rescan) goto top; } return limit; } static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep) { int limit = ep->ep.maxpacket; if (dum->gadget.speed == USB_SPEED_HIGH) { int tmp; /* high bandwidth mode */ tmp = usb_endpoint_maxp(ep->desc); tmp = (tmp >> 11) & 0x03; tmp *= 8 /* applies to entire frame */; limit += limit * tmp; } if (dum->gadget.speed == USB_SPEED_SUPER) { switch (usb_endpoint_type(ep->desc)) { case USB_ENDPOINT_XFER_ISOC: /* Sec. 4.4.8.2 USB3.0 Spec */ limit = 3 * 16 * 1024 * 8; break; case USB_ENDPOINT_XFER_INT: /* Sec. 4.4.7.2 USB3.0 Spec */ limit = 3 * 1024 * 8; break; case USB_ENDPOINT_XFER_BULK: default: break; } } return limit; } #define is_active(dum_hcd) ((dum_hcd->port_status & \ (USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE | \ USB_PORT_STAT_SUSPEND)) \ == (USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE)) static struct dummy_ep *find_endpoint(struct dummy *dum, u8 address) { int i; if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ? dum->ss_hcd : dum->hs_hcd))) return NULL; if ((address & ~USB_DIR_IN) == 0) return &dum->ep[0]; for (i = 1; i < DUMMY_ENDPOINTS; i++) { struct dummy_ep *ep = &dum->ep[i]; if (!ep->desc) continue; if (ep->desc->bEndpointAddress == address) return ep; } return NULL; } #undef is_active #define Dev_Request (USB_TYPE_STANDARD | USB_RECIP_DEVICE) #define Dev_InRequest (Dev_Request | USB_DIR_IN) #define Intf_Request (USB_TYPE_STANDARD | USB_RECIP_INTERFACE) #define Intf_InRequest (Intf_Request | USB_DIR_IN) #define Ep_Request (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT) #define Ep_InRequest (Ep_Request | USB_DIR_IN) /** * handle_control_request() - handles all control transfers * @dum: pointer to dummy (the_controller) * @urb: the urb request to handle * @setup: pointer to the setup data for a USB device control * request * @status: pointer to request handling status * * Return 0 - if the request was handled * 1 - if the request wasn't handles * error code on error */ static int handle_control_request(struct dummy_hcd *dum_hcd, struct urb *urb, struct usb_ctrlrequest *setup, int *status) { struct dummy_ep *ep2; struct dummy *dum = dum_hcd->dum; int ret_val = 1; unsigned w_index; unsigned w_value; w_index = le16_to_cpu(setup->wIndex); w_value = le16_to_cpu(setup->wValue); switch (setup->bRequest) { case USB_REQ_SET_ADDRESS: if (setup->bRequestType != Dev_Request) break; dum->address = w_value; *status = 0; dev_dbg(udc_dev(dum), "set_address = %d\n", w_value); ret_val = 0; break; case USB_REQ_SET_FEATURE: if (setup->bRequestType == Dev_Request) { ret_val = 0; switch (w_value) { case USB_DEVICE_REMOTE_WAKEUP: break; case USB_DEVICE_B_HNP_ENABLE: dum->gadget.b_hnp_enable = 1; break; case USB_DEVICE_A_HNP_SUPPORT: dum->gadget.a_hnp_support = 1; break; case USB_DEVICE_A_ALT_HNP_SUPPORT: dum->gadget.a_alt_hnp_support = 1; break; case USB_DEVICE_U1_ENABLE: if (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3) w_value = USB_DEV_STAT_U1_ENABLED; else ret_val = -EOPNOTSUPP; break; case USB_DEVICE_U2_ENABLE: if (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3) w_value = USB_DEV_STAT_U2_ENABLED; else ret_val = -EOPNOTSUPP; break; case USB_DEVICE_LTM_ENABLE: if (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3) w_value = USB_DEV_STAT_LTM_ENABLED; else ret_val = -EOPNOTSUPP; break; default: ret_val = -EOPNOTSUPP; } if (ret_val == 0) { dum->devstatus |= (1 << w_value); *status = 0; } } else if (setup->bRequestType == Ep_Request) { /* endpoint halt */ ep2 = find_endpoint(dum, w_index); if (!ep2 || ep2->ep.name == ep0name) { ret_val = -EOPNOTSUPP; break; } ep2->halted = 1; ret_val = 0; *status = 0; } break; case USB_REQ_CLEAR_FEATURE: if (setup->bRequestType == Dev_Request) { ret_val = 0; switch (w_value) { case USB_DEVICE_REMOTE_WAKEUP: w_value = USB_DEVICE_REMOTE_WAKEUP; break; case USB_DEVICE_U1_ENABLE: if (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3) w_value = USB_DEV_STAT_U1_ENABLED; else ret_val = -EOPNOTSUPP; break; case USB_DEVICE_U2_ENABLE: if (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3) w_value = USB_DEV_STAT_U2_ENABLED; else ret_val = -EOPNOTSUPP; break; case USB_DEVICE_LTM_ENABLE: if (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3) w_value = USB_DEV_STAT_LTM_ENABLED; else ret_val = -EOPNOTSUPP; break; default: ret_val = -EOPNOTSUPP; break; } if (ret_val == 0) { dum->devstatus &= ~(1 << w_value); *status = 0; } } else if (setup->bRequestType == Ep_Request) { /* endpoint halt */ ep2 = find_endpoint(dum, w_index); if (!ep2) { ret_val = -EOPNOTSUPP; break; } if (!ep2->wedged) ep2->halted = 0; ret_val = 0; *status = 0; } break; case USB_REQ_GET_STATUS: if (setup->bRequestType == Dev_InRequest || setup->bRequestType == Intf_InRequest || setup->bRequestType == Ep_InRequest) { char *buf; /* * device: remote wakeup, selfpowered * interface: nothing * endpoint: halt */ buf = (char *)urb->transfer_buffer; if (urb->transfer_buffer_length > 0) { if (setup->bRequestType == Ep_InRequest) { ep2 = find_endpoint(dum, w_index); if (!ep2) { ret_val = -EOPNOTSUPP; break; } buf[0] = ep2->halted; } else if (setup->bRequestType == Dev_InRequest) { buf[0] = (u8)dum->devstatus; } else buf[0] = 0; } if (urb->transfer_buffer_length > 1) buf[1] = 0; urb->actual_length = min_t(u32, 2, urb->transfer_buffer_length); ret_val = 0; *status = 0; } break; } return ret_val; } /* drive both sides of the transfers; looks like irq handlers to * both drivers except the callbacks aren't in_irq(). */ static void dummy_timer(unsigned long _dum_hcd) { struct dummy_hcd *dum_hcd = (struct dummy_hcd *) _dum_hcd; struct dummy *dum = dum_hcd->dum; struct urbp *urbp, *tmp; unsigned long flags; int limit, total; int i; /* simplistic model for one frame's bandwidth */ switch (dum->gadget.speed) { case USB_SPEED_LOW: total = 8/*bytes*/ * 12/*packets*/; break; case USB_SPEED_FULL: total = 64/*bytes*/ * 19/*packets*/; break; case USB_SPEED_HIGH: total = 512/*bytes*/ * 13/*packets*/ * 8/*uframes*/; break; case USB_SPEED_SUPER: /* Bus speed is 500000 bytes/ms, so use a little less */ total = 490000; break; default: dev_err(dummy_dev(dum_hcd), "bogus device speed\n"); return; } /* FIXME if HZ != 1000 this will probably misbehave ... */ /* look at each urb queued by the host side driver */ spin_lock_irqsave(&dum->lock, flags); if (!dum_hcd->udev) { dev_err(dummy_dev(dum_hcd), "timer fired with no URBs pending?\n"); spin_unlock_irqrestore(&dum->lock, flags); return; } for (i = 0; i < DUMMY_ENDPOINTS; i++) { if (!ep_name[i]) break; dum->ep[i].already_seen = 0; } restart: list_for_each_entry_safe(urbp, tmp, &dum_hcd->urbp_list, urbp_list) { struct urb *urb; struct dummy_request *req; u8 address; struct dummy_ep *ep = NULL; int type; int status = -EINPROGRESS; urb = urbp->urb; if (urb->unlinked) goto return_urb; else if (dum_hcd->rh_state != DUMMY_RH_RUNNING) continue; type = usb_pipetype(urb->pipe); /* used up this frame's non-periodic bandwidth? * FIXME there's infinite bandwidth for control and * periodic transfers ... unrealistic. */ if (total <= 0 && type == PIPE_BULK) continue; /* find the gadget's ep for this request (if configured) */ address = usb_pipeendpoint (urb->pipe); if (usb_pipein(urb->pipe)) address |= USB_DIR_IN; ep = find_endpoint(dum, address); if (!ep) { /* set_configuration() disagreement */ dev_dbg(dummy_dev(dum_hcd), "no ep configured for urb %p\n", urb); status = -EPROTO; goto return_urb; } if (ep->already_seen) continue; ep->already_seen = 1; if (ep == &dum->ep[0] && urb->error_count) { ep->setup_stage = 1; /* a new urb */ urb->error_count = 0; } if (ep->halted && !ep->setup_stage) { /* NOTE: must not be iso! */ dev_dbg(dummy_dev(dum_hcd), "ep %s halted, urb %p\n", ep->ep.name, urb); status = -EPIPE; goto return_urb; } /* FIXME make sure both ends agree on maxpacket */ /* handle control requests */ if (ep == &dum->ep[0] && ep->setup_stage) { struct usb_ctrlrequest setup; int value = 1; setup = *(struct usb_ctrlrequest *) urb->setup_packet; /* paranoia, in case of stale queued data */ list_for_each_entry(req, &ep->queue, queue) { list_del_init(&req->queue); req->req.status = -EOVERFLOW; dev_dbg(udc_dev(dum), "stale req = %p\n", req); spin_unlock(&dum->lock); req->req.complete(&ep->ep, &req->req); spin_lock(&dum->lock); ep->already_seen = 0; goto restart; } /* gadget driver never sees set_address or operations * on standard feature flags. some hardware doesn't * even expose them. */ ep->last_io = jiffies; ep->setup_stage = 0; ep->halted = 0; value = handle_control_request(dum_hcd, urb, &setup, &status); /* gadget driver handles all other requests. block * until setup() returns; no reentrancy issues etc. */ if (value > 0) { spin_unlock(&dum->lock); value = dum->driver->setup(&dum->gadget, &setup); spin_lock(&dum->lock); if (value >= 0) { /* no delays (max 64KB data stage) */ limit = 64*1024; goto treat_control_like_bulk; } /* error, see below */ } if (value < 0) { if (value != -EOPNOTSUPP) dev_dbg(udc_dev(dum), "setup --> %d\n", value); status = -EPIPE; urb->actual_length = 0; } goto return_urb; } /* non-control requests */ limit = total; switch (usb_pipetype(urb->pipe)) { case PIPE_ISOCHRONOUS: /* FIXME is it urb->interval since the last xfer? * use urb->iso_frame_desc[i]. * complete whether or not ep has requests queued. * report random errors, to debug drivers. */ limit = max(limit, periodic_bytes(dum, ep)); status = -ENOSYS; break; case PIPE_INTERRUPT: /* FIXME is it urb->interval since the last xfer? * this almost certainly polls too fast. */ limit = max(limit, periodic_bytes(dum, ep)); /* FALLTHROUGH */ default: treat_control_like_bulk: ep->last_io = jiffies; total = transfer(dum_hcd, urb, ep, limit, &status); break; } /* incomplete transfer? */ if (status == -EINPROGRESS) continue; return_urb: list_del(&urbp->urbp_list); kfree(urbp); if (ep) ep->already_seen = ep->setup_stage = 0; usb_hcd_unlink_urb_from_ep(dummy_hcd_to_hcd(dum_hcd), urb); spin_unlock(&dum->lock); usb_hcd_giveback_urb(dummy_hcd_to_hcd(dum_hcd), urb, status); spin_lock(&dum->lock); goto restart; } if (list_empty(&dum_hcd->urbp_list)) { usb_put_dev(dum_hcd->udev); dum_hcd->udev = NULL; } else if (dum_hcd->rh_state == DUMMY_RH_RUNNING) { /* want a 1 msec delay here */ mod_timer(&dum_hcd->timer, jiffies + msecs_to_jiffies(1)); } spin_unlock_irqrestore(&dum->lock, flags); } /*-------------------------------------------------------------------------*/ #define PORT_C_MASK \ ((USB_PORT_STAT_C_CONNECTION \ | USB_PORT_STAT_C_ENABLE \ | USB_PORT_STAT_C_SUSPEND \ | USB_PORT_STAT_C_OVERCURRENT \ | USB_PORT_STAT_C_RESET) << 16) static int dummy_hub_status(struct usb_hcd *hcd, char *buf) { struct dummy_hcd *dum_hcd; unsigned long flags; int retval = 0; dum_hcd = hcd_to_dummy_hcd(hcd); spin_lock_irqsave(&dum_hcd->dum->lock, flags); if (!HCD_HW_ACCESSIBLE(hcd)) goto done; if (dum_hcd->resuming && time_after_eq(jiffies, dum_hcd->re_timeout)) { dum_hcd->port_status |= (USB_PORT_STAT_C_SUSPEND << 16); dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND; set_link_state(dum_hcd); } if ((dum_hcd->port_status & PORT_C_MASK) != 0) { *buf = (1 << 1); dev_dbg(dummy_dev(dum_hcd), "port status 0x%08x has changes\n", dum_hcd->port_status); retval = 1; if (dum_hcd->rh_state == DUMMY_RH_SUSPENDED) usb_hcd_resume_root_hub(hcd); } done: spin_unlock_irqrestore(&dum_hcd->dum->lock, flags); return retval; } /* usb 3.0 root hub device descriptor */ static struct { struct usb_bos_descriptor bos; struct usb_ss_cap_descriptor ss_cap; } __packed usb3_bos_desc = { .bos = { .bLength = USB_DT_BOS_SIZE, .bDescriptorType = USB_DT_BOS, .wTotalLength = cpu_to_le16(sizeof(usb3_bos_desc)), .bNumDeviceCaps = 1, }, .ss_cap = { .bLength = USB_DT_USB_SS_CAP_SIZE, .bDescriptorType = USB_DT_DEVICE_CAPABILITY, .bDevCapabilityType = USB_SS_CAP_TYPE, .wSpeedSupported = cpu_to_le16(USB_5GBPS_OPERATION), .bFunctionalitySupport = ilog2(USB_5GBPS_OPERATION), }, }; static inline void ss_hub_descriptor(struct usb_hub_descriptor *desc) { memset(desc, 0, sizeof *desc); desc->bDescriptorType = 0x2a; desc->bDescLength = 12; desc->wHubCharacteristics = cpu_to_le16(0x0001); desc->bNbrPorts = 1; desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/ desc->u.ss.DeviceRemovable = 0xffff; } static inline void hub_descriptor(struct usb_hub_descriptor *desc) { memset(desc, 0, sizeof *desc); desc->bDescriptorType = 0x29; desc->bDescLength = 9; desc->wHubCharacteristics = cpu_to_le16(0x0001); desc->bNbrPorts = 1; desc->u.hs.DeviceRemovable[0] = 0xff; desc->u.hs.DeviceRemovable[1] = 0xff; } static int dummy_hub_control( struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength ) { struct dummy_hcd *dum_hcd; int retval = 0; unsigned long flags; if (!HCD_HW_ACCESSIBLE(hcd)) return -ETIMEDOUT; dum_hcd = hcd_to_dummy_hcd(hcd); spin_lock_irqsave(&dum_hcd->dum->lock, flags); switch (typeReq) { case ClearHubFeature: break; case ClearPortFeature: switch (wValue) { case USB_PORT_FEAT_SUSPEND: if (hcd->speed == HCD_USB3) { dev_dbg(dummy_dev(dum_hcd), "USB_PORT_FEAT_SUSPEND req not " "supported for USB 3.0 roothub\n"); goto error; } if (dum_hcd->port_status & USB_PORT_STAT_SUSPEND) { /* 20msec resume signaling */ dum_hcd->resuming = 1; dum_hcd->re_timeout = jiffies + msecs_to_jiffies(20); } break; case USB_PORT_FEAT_POWER: if (hcd->speed == HCD_USB3) { if (dum_hcd->port_status & USB_PORT_STAT_POWER) dev_dbg(dummy_dev(dum_hcd), "power-off\n"); } else if (dum_hcd->port_status & USB_SS_PORT_STAT_POWER) dev_dbg(dummy_dev(dum_hcd), "power-off\n"); /* FALLS THROUGH */ default: dum_hcd->port_status &= ~(1 << wValue); set_link_state(dum_hcd); } break; case GetHubDescriptor: if (hcd->speed == HCD_USB3 && (wLength < USB_DT_SS_HUB_SIZE || wValue != (USB_DT_SS_HUB << 8))) { dev_dbg(dummy_dev(dum_hcd), "Wrong hub descriptor type for " "USB 3.0 roothub.\n"); goto error; } if (hcd->speed == HCD_USB3) ss_hub_descriptor((struct usb_hub_descriptor *) buf); else hub_descriptor((struct usb_hub_descriptor *) buf); break; case DeviceRequest | USB_REQ_GET_DESCRIPTOR: if (hcd->speed != HCD_USB3) goto error; if ((wValue >> 8) != USB_DT_BOS) goto error; memcpy(buf, &usb3_bos_desc, sizeof(usb3_bos_desc)); retval = sizeof(usb3_bos_desc); break; case GetHubStatus: *(__le32 *) buf = cpu_to_le32(0); break; case GetPortStatus: if (wIndex != 1) retval = -EPIPE; /* whoever resets or resumes must GetPortStatus to * complete it!! */ if (dum_hcd->resuming && time_after_eq(jiffies, dum_hcd->re_timeout)) { dum_hcd->port_status |= (USB_PORT_STAT_C_SUSPEND << 16); dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND; } if ((dum_hcd->port_status & USB_PORT_STAT_RESET) != 0 && time_after_eq(jiffies, dum_hcd->re_timeout)) { dum_hcd->port_status |= (USB_PORT_STAT_C_RESET << 16); dum_hcd->port_status &= ~USB_PORT_STAT_RESET; if (dum_hcd->dum->pullup) { dum_hcd->port_status |= USB_PORT_STAT_ENABLE; if (hcd->speed < HCD_USB3) { switch (dum_hcd->dum->gadget.speed) { case USB_SPEED_HIGH: dum_hcd->port_status |= USB_PORT_STAT_HIGH_SPEED; break; case USB_SPEED_LOW: dum_hcd->dum->gadget.ep0-> maxpacket = 8; dum_hcd->port_status |= USB_PORT_STAT_LOW_SPEED; break; default: dum_hcd->dum->gadget.speed = USB_SPEED_FULL; break; } } } } set_link_state(dum_hcd); ((__le16 *) buf)[0] = cpu_to_le16(dum_hcd->port_status); ((__le16 *) buf)[1] = cpu_to_le16(dum_hcd->port_status >> 16); break; case SetHubFeature: retval = -EPIPE; break; case SetPortFeature: switch (wValue) { case USB_PORT_FEAT_LINK_STATE: if (hcd->speed != HCD_USB3) { dev_dbg(dummy_dev(dum_hcd), "USB_PORT_FEAT_LINK_STATE req not " "supported for USB 2.0 roothub\n"); goto error; } /* * Since this is dummy we don't have an actual link so * there is nothing to do for the SET_LINK_STATE cmd */ break; case USB_PORT_FEAT_U1_TIMEOUT: case USB_PORT_FEAT_U2_TIMEOUT: /* TODO: add suspend/resume support! */ if (hcd->speed != HCD_USB3) { dev_dbg(dummy_dev(dum_hcd), "USB_PORT_FEAT_U1/2_TIMEOUT req not " "supported for USB 2.0 roothub\n"); goto error; } break; case USB_PORT_FEAT_SUSPEND: /* Applicable only for USB2.0 hub */ if (hcd->speed == HCD_USB3) { dev_dbg(dummy_dev(dum_hcd), "USB_PORT_FEAT_SUSPEND req not " "supported for USB 3.0 roothub\n"); goto error; } if (dum_hcd->active) { dum_hcd->port_status |= USB_PORT_STAT_SUSPEND; /* HNP would happen here; for now we * assume b_bus_req is always true. */ set_link_state(dum_hcd); if (((1 << USB_DEVICE_B_HNP_ENABLE) & dum_hcd->dum->devstatus) != 0) dev_dbg(dummy_dev(dum_hcd), "no HNP yet!\n"); } break; case USB_PORT_FEAT_POWER: if (hcd->speed == HCD_USB3) dum_hcd->port_status |= USB_SS_PORT_STAT_POWER; else dum_hcd->port_status |= USB_PORT_STAT_POWER; set_link_state(dum_hcd); break; case USB_PORT_FEAT_BH_PORT_RESET: /* Applicable only for USB3.0 hub */ if (hcd->speed != HCD_USB3) { dev_dbg(dummy_dev(dum_hcd), "USB_PORT_FEAT_BH_PORT_RESET req not " "supported for USB 2.0 roothub\n"); goto error; } /* FALLS THROUGH */ case USB_PORT_FEAT_RESET: /* if it's already enabled, disable */ if (hcd->speed == HCD_USB3) { dum_hcd->port_status = 0; dum_hcd->port_status = (USB_SS_PORT_STAT_POWER | USB_PORT_STAT_CONNECTION | USB_PORT_STAT_RESET); } else dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED); /* * We want to reset device status. All but the * Self powered feature */ dum_hcd->dum->devstatus &= (1 << USB_DEVICE_SELF_POWERED); /* * FIXME USB3.0: what is the correct reset signaling * interval? Is it still 50msec as for HS? */ dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50); /* FALLS THROUGH */ default: if (hcd->speed == HCD_USB3) { if ((dum_hcd->port_status & USB_SS_PORT_STAT_POWER) != 0) { dum_hcd->port_status |= (1 << wValue); set_link_state(dum_hcd); } } else if ((dum_hcd->port_status & USB_PORT_STAT_POWER) != 0) { dum_hcd->port_status |= (1 << wValue); set_link_state(dum_hcd); } } break; case GetPortErrorCount: if (hcd->speed != HCD_USB3) { dev_dbg(dummy_dev(dum_hcd), "GetPortErrorCount req not " "supported for USB 2.0 roothub\n"); goto error; } /* We'll always return 0 since this is a dummy hub */ *(__le32 *) buf = cpu_to_le32(0); break; case SetHubDepth: if (hcd->speed != HCD_USB3) { dev_dbg(dummy_dev(dum_hcd), "SetHubDepth req not supported for " "USB 2.0 roothub\n"); goto error; } break; default: dev_dbg(dummy_dev(dum_hcd), "hub control req%04x v%04x i%04x l%d\n", typeReq, wValue, wIndex, wLength); error: /* "protocol stall" on error */ retval = -EPIPE; } spin_unlock_irqrestore(&dum_hcd->dum->lock, flags); if ((dum_hcd->port_status & PORT_C_MASK) != 0) usb_hcd_poll_rh_status(hcd); return retval; } static int dummy_bus_suspend(struct usb_hcd *hcd) { struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd); dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__); spin_lock_irq(&dum_hcd->dum->lock); dum_hcd->rh_state = DUMMY_RH_SUSPENDED; set_link_state(dum_hcd); hcd->state = HC_STATE_SUSPENDED; spin_unlock_irq(&dum_hcd->dum->lock); return 0; } static int dummy_bus_resume(struct usb_hcd *hcd) { struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd); int rc = 0; dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__); spin_lock_irq(&dum_hcd->dum->lock); if (!HCD_HW_ACCESSIBLE(hcd)) { rc = -ESHUTDOWN; } else { dum_hcd->rh_state = DUMMY_RH_RUNNING; set_link_state(dum_hcd); if (!list_empty(&dum_hcd->urbp_list)) mod_timer(&dum_hcd->timer, jiffies); hcd->state = HC_STATE_RUNNING; } spin_unlock_irq(&dum_hcd->dum->lock); return rc; } /*-------------------------------------------------------------------------*/ static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb) { int ep = usb_pipeendpoint(urb->pipe); return snprintf(buf, size, "urb/%p %s ep%d%s%s len %d/%d\n", urb, ({ char *s; switch (urb->dev->speed) { case USB_SPEED_LOW: s = "ls"; break; case USB_SPEED_FULL: s = "fs"; break; case USB_SPEED_HIGH: s = "hs"; break; case USB_SPEED_SUPER: s = "ss"; break; default: s = "?"; break; } s; }), ep, ep ? (usb_pipein(urb->pipe) ? "in" : "out") : "", ({ char *s; \ switch (usb_pipetype(urb->pipe)) { \ case PIPE_CONTROL: \ s = ""; \ break; \ case PIPE_BULK: \ s = "-bulk"; \ break; \ case PIPE_INTERRUPT: \ s = "-int"; \ break; \ default: \ s = "-iso"; \ break; \ } s; }), urb->actual_length, urb->transfer_buffer_length); } static ssize_t urbs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd); struct urbp *urbp; size_t size = 0; unsigned long flags; spin_lock_irqsave(&dum_hcd->dum->lock, flags); list_for_each_entry(urbp, &dum_hcd->urbp_list, urbp_list) { size_t temp; temp = show_urb(buf, PAGE_SIZE - size, urbp->urb); buf += temp; size += temp; } spin_unlock_irqrestore(&dum_hcd->dum->lock, flags); return size; } static DEVICE_ATTR_RO(urbs); static int dummy_start_ss(struct dummy_hcd *dum_hcd) { init_timer(&dum_hcd->timer); dum_hcd->timer.function = dummy_timer; dum_hcd->timer.data = (unsigned long)dum_hcd; dum_hcd->rh_state = DUMMY_RH_RUNNING; dum_hcd->stream_en_ep = 0; INIT_LIST_HEAD(&dum_hcd->urbp_list); dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET; dummy_hcd_to_hcd(dum_hcd)->state = HC_STATE_RUNNING; dummy_hcd_to_hcd(dum_hcd)->uses_new_polling = 1; #ifdef CONFIG_USB_OTG dummy_hcd_to_hcd(dum_hcd)->self.otg_port = 1; #endif return 0; /* FIXME 'urbs' should be a per-device thing, maybe in usbcore */ return device_create_file(dummy_dev(dum_hcd), &dev_attr_urbs); } static int dummy_start(struct usb_hcd *hcd) { struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd); /* * MASTER side init ... we emulate a root hub that'll only ever * talk to one device (the slave side). Also appears in sysfs, * just like more familiar pci-based HCDs. */ if (!usb_hcd_is_primary_hcd(hcd)) return dummy_start_ss(dum_hcd); spin_lock_init(&dum_hcd->dum->lock); init_timer(&dum_hcd->timer); dum_hcd->timer.function = dummy_timer; dum_hcd->timer.data = (unsigned long)dum_hcd; dum_hcd->rh_state = DUMMY_RH_RUNNING; INIT_LIST_HEAD(&dum_hcd->urbp_list); hcd->power_budget = POWER_BUDGET; hcd->state = HC_STATE_RUNNING; hcd->uses_new_polling = 1; #ifdef CONFIG_USB_OTG hcd->self.otg_port = 1; #endif /* FIXME 'urbs' should be a per-device thing, maybe in usbcore */ return device_create_file(dummy_dev(dum_hcd), &dev_attr_urbs); } static void dummy_stop(struct usb_hcd *hcd) { struct dummy *dum; dum = hcd_to_dummy_hcd(hcd)->dum; device_remove_file(dummy_dev(hcd_to_dummy_hcd(hcd)), &dev_attr_urbs); usb_gadget_unregister_driver(dum->driver); dev_info(dummy_dev(hcd_to_dummy_hcd(hcd)), "stopped\n"); } /*-------------------------------------------------------------------------*/ static int dummy_h_get_frame(struct usb_hcd *hcd) { return dummy_g_get_frame(NULL); } static int dummy_setup(struct usb_hcd *hcd) { struct dummy *dum; dum = *((void **)dev_get_platdata(hcd->self.controller)); hcd->self.sg_tablesize = ~0; if (usb_hcd_is_primary_hcd(hcd)) { dum->hs_hcd = hcd_to_dummy_hcd(hcd); dum->hs_hcd->dum = dum; /* * Mark the first roothub as being USB 2.0. * The USB 3.0 roothub will be registered later by * dummy_hcd_probe() */ hcd->speed = HCD_USB2; hcd->self.root_hub->speed = USB_SPEED_HIGH; } else { dum->ss_hcd = hcd_to_dummy_hcd(hcd); dum->ss_hcd->dum = dum; hcd->speed = HCD_USB3; hcd->self.root_hub->speed = USB_SPEED_SUPER; } return 0; } /* Change a group of bulk endpoints to support multiple stream IDs */ static int dummy_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint **eps, unsigned int num_eps, unsigned int num_streams, gfp_t mem_flags) { struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd); unsigned long flags; int max_stream; int ret_streams = num_streams; unsigned int index; unsigned int i; if (!num_eps) return -EINVAL; spin_lock_irqsave(&dum_hcd->dum->lock, flags); for (i = 0; i < num_eps; i++) { index = dummy_get_ep_idx(&eps[i]->desc); if ((1 << index) & dum_hcd->stream_en_ep) { ret_streams = -EINVAL; goto out; } max_stream = usb_ss_max_streams(&eps[i]->ss_ep_comp); if (!max_stream) { ret_streams = -EINVAL; goto out; } if (max_stream < ret_streams) { dev_dbg(dummy_dev(dum_hcd), "Ep 0x%x only supports %u " "stream IDs.\n", eps[i]->desc.bEndpointAddress, max_stream); ret_streams = max_stream; } } for (i = 0; i < num_eps; i++) { index = dummy_get_ep_idx(&eps[i]->desc); dum_hcd->stream_en_ep |= 1 << index; set_max_streams_for_pipe(dum_hcd, usb_endpoint_num(&eps[i]->desc), ret_streams); } out: spin_unlock_irqrestore(&dum_hcd->dum->lock, flags); return ret_streams; } /* Reverts a group of bulk endpoints back to not using stream IDs. */ static int dummy_free_streams(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint **eps, unsigned int num_eps, gfp_t mem_flags) { struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd); unsigned long flags; int ret; unsigned int index; unsigned int i; spin_lock_irqsave(&dum_hcd->dum->lock, flags); for (i = 0; i < num_eps; i++) { index = dummy_get_ep_idx(&eps[i]->desc); if (!((1 << index) & dum_hcd->stream_en_ep)) { ret = -EINVAL; goto out; } } for (i = 0; i < num_eps; i++) { index = dummy_get_ep_idx(&eps[i]->desc); dum_hcd->stream_en_ep &= ~(1 << index); set_max_streams_for_pipe(dum_hcd, usb_endpoint_num(&eps[i]->desc), 0); } ret = 0; out: spin_unlock_irqrestore(&dum_hcd->dum->lock, flags); return ret; } static struct hc_driver dummy_hcd = { .description = (char *) driver_name, .product_desc = "Dummy host controller", .hcd_priv_size = sizeof(struct dummy_hcd), .flags = HCD_USB3 | HCD_SHARED, .reset = dummy_setup, .start = dummy_start, .stop = dummy_stop, .urb_enqueue = dummy_urb_enqueue, .urb_dequeue = dummy_urb_dequeue, .get_frame_number = dummy_h_get_frame, .hub_status_data = dummy_hub_status, .hub_control = dummy_hub_control, .bus_suspend = dummy_bus_suspend, .bus_resume = dummy_bus_resume, .alloc_streams = dummy_alloc_streams, .free_streams = dummy_free_streams, }; static int dummy_hcd_probe(struct platform_device *pdev) { struct dummy *dum; struct usb_hcd *hs_hcd; struct usb_hcd *ss_hcd; int retval; dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc); dum = *((void **)dev_get_platdata(&pdev->dev)); if (!mod_data.is_super_speed) dummy_hcd.flags = HCD_USB2; hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev)); if (!hs_hcd) return -ENOMEM; hs_hcd->has_tt = 1; retval = usb_add_hcd(hs_hcd, 0, 0); if (retval) goto put_usb2_hcd; if (mod_data.is_super_speed) { ss_hcd = usb_create_shared_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev), hs_hcd); if (!ss_hcd) { retval = -ENOMEM; goto dealloc_usb2_hcd; } retval = usb_add_hcd(ss_hcd, 0, 0); if (retval) goto put_usb3_hcd; } return 0; put_usb3_hcd: usb_put_hcd(ss_hcd); dealloc_usb2_hcd: usb_remove_hcd(hs_hcd); put_usb2_hcd: usb_put_hcd(hs_hcd); dum->hs_hcd = dum->ss_hcd = NULL; return retval; } static int dummy_hcd_remove(struct platform_device *pdev) { struct dummy *dum; dum = hcd_to_dummy_hcd(platform_get_drvdata(pdev))->dum; if (dum->ss_hcd) { usb_remove_hcd(dummy_hcd_to_hcd(dum->ss_hcd)); usb_put_hcd(dummy_hcd_to_hcd(dum->ss_hcd)); } usb_remove_hcd(dummy_hcd_to_hcd(dum->hs_hcd)); usb_put_hcd(dummy_hcd_to_hcd(dum->hs_hcd)); dum->hs_hcd = NULL; dum->ss_hcd = NULL; return 0; } static int dummy_hcd_suspend(struct platform_device *pdev, pm_message_t state) { struct usb_hcd *hcd; struct dummy_hcd *dum_hcd; int rc = 0; dev_dbg(&pdev->dev, "%s\n", __func__); hcd = platform_get_drvdata(pdev); dum_hcd = hcd_to_dummy_hcd(hcd); if (dum_hcd->rh_state == DUMMY_RH_RUNNING) { dev_warn(&pdev->dev, "Root hub isn't suspended!\n"); rc = -EBUSY; } else clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); return rc; } static int dummy_hcd_resume(struct platform_device *pdev) { struct usb_hcd *hcd; dev_dbg(&pdev->dev, "%s\n", __func__); hcd = platform_get_drvdata(pdev); set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); usb_hcd_poll_rh_status(hcd); return 0; } static struct platform_driver dummy_hcd_driver = { .probe = dummy_hcd_probe, .remove = dummy_hcd_remove, .suspend = dummy_hcd_suspend, .resume = dummy_hcd_resume, .driver = { .name = (char *) driver_name, .owner = THIS_MODULE, }, }; /*-------------------------------------------------------------------------*/ #define MAX_NUM_UDC 2 static struct platform_device *the_udc_pdev[MAX_NUM_UDC]; static struct platform_device *the_hcd_pdev[MAX_NUM_UDC]; static int __init init(void) { int retval = -ENOMEM; int i; struct dummy *dum[MAX_NUM_UDC]; if (usb_disabled()) return -ENODEV; if (!mod_data.is_high_speed && mod_data.is_super_speed) return -EINVAL; if (mod_data.num < 1 || mod_data.num > MAX_NUM_UDC) { pr_err("Number of emulated UDC must be in range of 1…%d\n", MAX_NUM_UDC); return -EINVAL; } for (i = 0; i < mod_data.num; i++) { the_hcd_pdev[i] = platform_device_alloc(driver_name, i); if (!the_hcd_pdev[i]) { i--; while (i >= 0) platform_device_put(the_hcd_pdev[i--]); return retval; } } for (i = 0; i < mod_data.num; i++) { the_udc_pdev[i] = platform_device_alloc(gadget_name, i); if (!the_udc_pdev[i]) { i--; while (i >= 0) platform_device_put(the_udc_pdev[i--]); goto err_alloc_udc; } } for (i = 0; i < mod_data.num; i++) { dum[i] = kzalloc(sizeof(struct dummy), GFP_KERNEL); if (!dum[i]) { retval = -ENOMEM; goto err_add_pdata; } retval = platform_device_add_data(the_hcd_pdev[i], &dum[i], sizeof(void *)); if (retval) goto err_add_pdata; retval = platform_device_add_data(the_udc_pdev[i], &dum[i], sizeof(void *)); if (retval) goto err_add_pdata; } retval = platform_driver_register(&dummy_hcd_driver); if (retval < 0) goto err_add_pdata; retval = platform_driver_register(&dummy_udc_driver); if (retval < 0) goto err_register_udc_driver; for (i = 0; i < mod_data.num; i++) { retval = platform_device_add(the_hcd_pdev[i]); if (retval < 0) { i--; while (i >= 0) platform_device_del(the_hcd_pdev[i--]); goto err_add_hcd; } } for (i = 0; i < mod_data.num; i++) { if (!dum[i]->hs_hcd || (!dum[i]->ss_hcd && mod_data.is_super_speed)) { /* * The hcd was added successfully but its probe * function failed for some reason. */ retval = -EINVAL; goto err_add_udc; } } for (i = 0; i < mod_data.num; i++) { retval = platform_device_add(the_udc_pdev[i]); if (retval < 0) { i--; while (i >= 0) platform_device_del(the_udc_pdev[i]); goto err_add_udc; } } for (i = 0; i < mod_data.num; i++) { if (!platform_get_drvdata(the_udc_pdev[i])) { /* * The udc was added successfully but its probe * function failed for some reason. */ retval = -EINVAL; goto err_probe_udc; } } return retval; err_probe_udc: for (i = 0; i < mod_data.num; i++) platform_device_del(the_udc_pdev[i]); err_add_udc: for (i = 0; i < mod_data.num; i++) platform_device_del(the_hcd_pdev[i]); err_add_hcd: platform_driver_unregister(&dummy_udc_driver); err_register_udc_driver: platform_driver_unregister(&dummy_hcd_driver); err_add_pdata: for (i = 0; i < mod_data.num; i++) kfree(dum[i]); for (i = 0; i < mod_data.num; i++) platform_device_put(the_udc_pdev[i]); err_alloc_udc: for (i = 0; i < mod_data.num; i++) platform_device_put(the_hcd_pdev[i]); return retval; } module_init(init); static void __exit cleanup(void) { int i; for (i = 0; i < mod_data.num; i++) { struct dummy *dum; dum = *((void **)dev_get_platdata(&the_udc_pdev[i]->dev)); platform_device_unregister(the_udc_pdev[i]); platform_device_unregister(the_hcd_pdev[i]); kfree(dum); } platform_driver_unregister(&dummy_udc_driver); platform_driver_unregister(&dummy_hcd_driver); } module_exit(cleanup);
gpl-2.0
chucktr/android_kernel_htc_msm8960
drivers/staging/prima/CORE/MAC/src/pe/lim/limSendManagementFrames.c
77
192033
/* * Copyright (c) 2012, Code Aurora Forum. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /** * \file limSendManagementFrames.c * * \brief Code for preparing and sending 802.11 Management frames * * Copyright (C) 2005-2007 Airgo Networks, Incorporated * */ #include "sirApi.h" #include "aniGlobal.h" #include "sirMacProtDef.h" #ifdef FEATURE_WLAN_NON_INTEGRATED_SOC #include "halDataStruct.h" #endif #include "cfgApi.h" #include "utilsApi.h" #include "limTypes.h" #include "limUtils.h" #include "limSecurityUtils.h" #include "dot11f.h" #include "limStaHashApi.h" #include "schApi.h" #include "limSendMessages.h" #if defined WLAN_FEATURE_VOWIFI #include "rrmApi.h" #endif #ifdef FEATURE_WLAN_CCX #include <limCcxparserApi.h> #endif #include "wlan_qct_wda.h" #ifdef WLAN_FEATURE_11W #include "dot11fdefs.h" #endif //////////////////////////////////////////////////////////////////////// /// Get an integral configuration item & check return status; if it /// fails, return. #define CFG_LIM_GET_INT_NO_STATUS(nStatus, pMac, nItem, cfg ) \ (nStatus) = wlan_cfgGetInt( (pMac), (nItem), & (cfg) ); \ if ( eSIR_SUCCESS != (nStatus) ) \ { \ limLog( (pMac), LOGP, FL("Failed to retrieve " \ #nItem " from CFG (%d).\n"), \ (nStatus) ); \ return; \ } /// Get an text configuration item & check return status; if it fails, /// return. #define CFG_LIM_GET_STR_NO_STATUS(nStatus, pMac, nItem, cfg, nCfg, \ nMaxCfg) \ (nCfg) = (nMaxCfg); \ (nStatus) = wlan_cfgGetStr( (pMac), (nItem), (cfg), & (nCfg) ); \ if ( eSIR_SUCCESS != (nStatus) ) \ { \ limLog( (pMac), LOGP, FL("Failed to retrieve " \ #nItem " from CFG (%d).\n"), \ (nStatus) ); \ return; \ } /** * * \brief This function is called by various LIM modules to prepare the * 802.11 frame MAC header * * * \param pMac Pointer to Global MAC structure * * \param pBD Pointer to the frame buffer that needs to be populate * * \param type Type of the frame * * \param subType Subtype of the frame * * \return eHalStatus * * * The pFrameBuf argument points to the beginning of the frame buffer to * which - a) The 802.11 MAC header is set b) Following this MAC header * will be the MGMT frame payload The payload itself is populated by the * caller API * * */ tSirRetStatus limPopulateMacHeader( tpAniSirGlobal pMac, tANI_U8* pBD, tANI_U8 type, tANI_U8 subType, tSirMacAddr peerAddr ,tSirMacAddr selfMacAddr) { tSirRetStatus statusCode = eSIR_SUCCESS; tpSirMacMgmtHdr pMacHdr; /// Prepare MAC management header pMacHdr = (tpSirMacMgmtHdr) (pBD); // Prepare FC pMacHdr->fc.protVer = SIR_MAC_PROTOCOL_VERSION; pMacHdr->fc.type = type; pMacHdr->fc.subType = subType; // Prepare Address 1 palCopyMemory( pMac->hHdd, (tANI_U8 *) pMacHdr->da, (tANI_U8 *) peerAddr, sizeof( tSirMacAddr )); // Prepare Address 2 #if 0 if ((statusCode = wlan_cfgGetStr(pMac, WNI_CFG_STA_ID, (tANI_U8 *) pMacHdr->sa, &cfgLen)) != eSIR_SUCCESS) { // Could not get STA_ID from CFG. Log error. limLog( pMac, LOGP, FL("Failed to retrive STA_ID\n")); return statusCode; } #endif// TO SUPPORT BT-AMP sirCopyMacAddr(pMacHdr->sa,selfMacAddr); // Prepare Address 3 palCopyMemory( pMac->hHdd, (tANI_U8 *) pMacHdr->bssId, (tANI_U8 *) peerAddr, sizeof( tSirMacAddr )); return statusCode; } /*** end limPopulateMacHeader() ***/ /** * \brief limSendProbeReqMgmtFrame * * * \param pMac Pointer to Global MAC structure * * \param pSsid SSID to be sent in Probe Request frame * * \param bssid BSSID to be sent in Probe Request frame * * \param nProbeDelay probe delay to be used before sending Probe Request * frame * * \param nChannelNum Channel # on which the Probe Request is going out * * \param nAdditionalIELen if non-zero, include pAdditionalIE in the Probe Request frame * * \param pAdditionalIE if nAdditionalIELen is non zero, include this field in the Probe Request frame * * This function is called by various LIM modules to send Probe Request frame * during active scan/learn phase. * Probe request is sent out in the following scenarios: * --heartbeat failure: session needed * --join req: session needed * --foreground scan: no session * --background scan: no session * --schBeaconProcessing: to get EDCA parameters: session needed * * */ tSirRetStatus limSendProbeReqMgmtFrame(tpAniSirGlobal pMac, tSirMacSSid *pSsid, tSirMacAddr bssid, tANI_U8 nChannelNum, tSirMacAddr SelfMacAddr, tANI_U32 dot11mode, tANI_U32 nAdditionalIELen, tANI_U8 *pAdditionalIE) { tDot11fProbeRequest pr; tANI_U32 nStatus, nBytes, nPayload; tSirRetStatus nSirStatus; tANI_U8 *pFrame; void *pPacket; eHalStatus halstatus; tpPESession psessionEntry; tANI_U8 sessionId; #ifdef WLAN_FEATURE_P2P tANI_U8 *p2pIe = NULL; #endif tANI_U8 txFlag = 0; #ifndef GEN4_SCAN return eSIR_FAILURE; #endif #if defined ( ANI_DVT_DEBUG ) return eSIR_FAILURE; #endif /* * session context may or may not be present, when probe request needs to be sent out. * following cases exist: * --heartbeat failure: session needed * --join req: session needed * --foreground scan: no session * --background scan: no session * --schBeaconProcessing: to get EDCA parameters: session needed * If session context does not exist, some IEs will be populated from CFGs, * e.g. Supported and Extended rate set IEs */ psessionEntry = peFindSessionByBssid(pMac,bssid,&sessionId); // The scheme here is to fill out a 'tDot11fProbeRequest' structure // and then hand it off to 'dot11fPackProbeRequest' (for // serialization). We start by zero-initializing the structure: palZeroMemory( pMac->hHdd, ( tANI_U8* )&pr, sizeof( pr ) ); // & delegating to assorted helpers: PopulateDot11fSSID( pMac, pSsid, &pr.SSID ); #ifdef WLAN_FEATURE_P2P if( nAdditionalIELen && pAdditionalIE ) { p2pIe = limGetP2pIEPtr(pMac, pAdditionalIE, nAdditionalIELen); } if( p2pIe != NULL) { /* In the below API pass channel number > 14, do that it fills only * 11a rates in supported rates */ PopulateDot11fSuppRates( pMac, 15, &pr.SuppRates,psessionEntry); } else { #endif PopulateDot11fSuppRates( pMac, nChannelNum, &pr.SuppRates,psessionEntry); if ( WNI_CFG_DOT11_MODE_11B != dot11mode ) { PopulateDot11fExtSuppRates1( pMac, nChannelNum, &pr.ExtSuppRates ); } #ifdef WLAN_FEATURE_P2P } #endif #if defined WLAN_FEATURE_VOWIFI //Table 7-14 in IEEE Std. 802.11k-2008 says //DS params "can" be present in RRM is disabled and "is" present if //RRM is enabled. It should be ok even if we add it into probe req when //RRM is not enabled. PopulateDot11fDSParams( pMac, &pr.DSParams, nChannelNum, psessionEntry ); //Call RRM module to get the tx power for management used. { tANI_U8 txPower = (tANI_U8) rrmGetMgmtTxPower( pMac, psessionEntry ); PopulateDot11fWFATPC( pMac, &pr.WFATPC, txPower, 0 ); } #endif pMac->lim.htCapability = IS_DOT11_MODE_HT(dot11mode); if (psessionEntry != NULL ) { psessionEntry->htCapabality = IS_DOT11_MODE_HT(dot11mode); //Include HT Capability IE if (psessionEntry->htCapabality) { PopulateDot11fHTCaps( pMac, &pr.HTCaps ); } } else { if (pMac->lim.htCapability) { PopulateDot11fHTCaps( pMac, &pr.HTCaps ); } } // That's it-- now we pack it. First, how much space are we going to // need? nStatus = dot11fGetPackedProbeRequestSize( pMac, &pr, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed size f" "or a Probe Request (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fProbeRequest ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for a Probe Request (" "0x%08x).\n"), nStatus ); } nBytes = nPayload + sizeof( tSirMacMgmtHdr ) + nAdditionalIELen; // Ok-- try to allocate some memory: halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGP, FL("Failed to allocate %d bytes for a Pro" "be Request.\n"), nBytes ); return eSIR_MEM_ALLOC_FAILED; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Next, we fill out the buffer descriptor: nSirStatus = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_PROBE_REQ, bssid ,SelfMacAddr); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to populate the buffer descrip" "tor for a Probe Request (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return nSirStatus; // allocated! } // That done, pack the Probe Request: nStatus = dot11fPackProbeRequest( pMac, &pr, pFrame + sizeof( tSirMacMgmtHdr ), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack a Probe Request (0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return eSIR_FAILURE; // allocated! } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing a P" "robe Request (0x%08x).\n") ); } // Append any AddIE if present. if( nAdditionalIELen ) { palCopyMemory( pMac->hHdd, pFrame+sizeof(tSirMacMgmtHdr)+nPayload, pAdditionalIE, nAdditionalIELen ); nPayload += nAdditionalIELen; } /* If this probe request is sent during P2P Search State, then we need * to send it at OFDM rate. */ if( ( SIR_BAND_5_GHZ == limGetRFBand(nChannelNum)) #ifdef WLAN_FEATURE_P2P || (( pMac->lim.gpLimMlmScanReq != NULL) && pMac->lim.gpLimMlmScanReq->p2pSearch ) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } halstatus = halTxFrame( pMac, pPacket, ( tANI_U16 ) sizeof(tSirMacMgmtHdr) + nPayload, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, txFlag ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGE, FL("could not send Probe Request frame!\n" )); //Pkt will be freed up by the callback return eSIR_FAILURE; } return eSIR_SUCCESS; } // End limSendProbeReqMgmtFrame. #ifdef WLAN_FEATURE_P2P tSirRetStatus limGetAddnIeForProbeResp(tpAniSirGlobal pMac, tANI_U8* addIE, tANI_U16 *addnIELen, tANI_U8 probeReqP2pIe) { /* If Probe request doesn't have P2P IE, then take out P2P IE from additional IE */ if(!probeReqP2pIe) { tANI_U8* tempbuf = NULL; tANI_U16 tempLen = 0; int left = *addnIELen; v_U8_t *ptr = addIE; v_U8_t elem_id, elem_len; if(NULL == addIE) { PELOGE(limLog(pMac, LOGE, FL(" NULL addIE pointer"));) return eSIR_FAILURE; } if( (palAllocateMemory(pMac->hHdd, (void**)&tempbuf, left)) != eHAL_STATUS_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("Unable to allocate memory to store addn IE"));) return eSIR_MEM_ALLOC_FAILED; } while(left >= 2) { elem_id = ptr[0]; elem_len = ptr[1]; left -= 2; if(elem_len > left) { limLog( pMac, LOGE, FL("****Invalid IEs eid = %d elem_len=%d left=%d*****\n"), elem_id,elem_len,left); palFreeMemory(pMac->hHdd, tempbuf); return eSIR_FAILURE; } if ( !( (SIR_MAC_EID_VENDOR == elem_id) && (memcmp(&ptr[2], SIR_MAC_P2P_OUI, SIR_MAC_P2P_OUI_SIZE)==0) ) ) { palCopyMemory ( pMac->hHdd, tempbuf + tempLen, &ptr[0], elem_len + 2); tempLen += (elem_len + 2); } left -= elem_len; ptr += (elem_len + 2); } palCopyMemory ( pMac->hHdd, addIE, tempbuf, tempLen); *addnIELen = tempLen; palFreeMemory(pMac->hHdd, tempbuf); } return eSIR_SUCCESS; } #endif void limSendProbeRspMgmtFrame(tpAniSirGlobal pMac, tSirMacAddr peerMacAddr, tpAniSSID pSsid, short nStaId, tANI_U8 nKeepAlive, tpPESession psessionEntry, tANI_U8 probeReqP2pIe) { tDot11fProbeResponse frm; tSirRetStatus nSirStatus; tANI_U32 cfg, nPayload, nBytes, nStatus; tpSirMacMgmtHdr pMacHdr; tANI_U8 *pFrame; void *pPacket; eHalStatus halstatus; tANI_U32 addnIEPresent; tANI_U32 addnIE1Len=0; tANI_U32 addnIE2Len=0; tANI_U32 addnIE3Len=0; tANI_U16 totalAddnIeLen = 0; tANI_U32 wpsApEnable=0, tmp; tANI_U8 txFlag = 0; tANI_U8 *addIE = NULL; #ifdef WLAN_FEATURE_P2P tANI_U8 *pP2pIe = NULL; tANI_U8 noaLen = 0; tANI_U8 total_noaLen = 0; tANI_U8 noaStream[SIR_MAX_NOA_ATTR_LEN + SIR_P2P_IE_HEADER_LEN]; tANI_U8 noaIe[SIR_MAX_NOA_ATTR_LEN + SIR_P2P_IE_HEADER_LEN]; #endif if(pMac->gDriverType == eDRIVER_TYPE_MFG) // We don't answer requests { return; // in this case. } if(NULL == psessionEntry) { return; } // Fill out 'frm', after which we'll just hand the struct off to // 'dot11fPackProbeResponse'. palZeroMemory( pMac->hHdd, ( tANI_U8* )&frm, sizeof( frm ) ); // Timestamp to be updated by TFP, below. // Beacon Interval: #ifdef WLAN_SOFTAP_FEATURE if(psessionEntry->limSystemRole == eLIM_AP_ROLE) { frm.BeaconInterval.interval = pMac->sch.schObject.gSchBeaconInterval; } else { #endif CFG_LIM_GET_INT_NO_STATUS( nSirStatus, pMac, WNI_CFG_BEACON_INTERVAL, cfg ); frm.BeaconInterval.interval = ( tANI_U16 ) cfg; #ifdef WLAN_SOFTAP_FEATURE } #endif PopulateDot11fCapabilities( pMac, &frm.Capabilities, psessionEntry ); PopulateDot11fSSID( pMac, ( tSirMacSSid* )pSsid, &frm.SSID ); PopulateDot11fSuppRates( pMac, POPULATE_DOT11F_RATES_OPERATIONAL, &frm.SuppRates,psessionEntry); PopulateDot11fDSParams( pMac, &frm.DSParams, psessionEntry->currentOperChannel, psessionEntry); PopulateDot11fIBSSParams( pMac, &frm.IBSSParams, psessionEntry ); #ifdef ANI_PRODUCT_TYPE_AP PopulateDot11fCFParams( pMac, &frm.Capabilities, &frm.CFParams ); #endif // AP Image #ifdef WLAN_SOFTAP_FEATURE if(psessionEntry->limSystemRole == eLIM_AP_ROLE) { if(psessionEntry->wps_state != SAP_WPS_DISABLED) { PopulateDot11fProbeResWPSIEs(pMac, &frm.WscProbeRes, psessionEntry); } } else { #endif if (wlan_cfgGetInt(pMac, (tANI_U16) WNI_CFG_WPS_ENABLE, &tmp) != eSIR_SUCCESS) limLog(pMac, LOGP,"Failed to cfg get id %d\n", WNI_CFG_WPS_ENABLE ); wpsApEnable = tmp & WNI_CFG_WPS_ENABLE_AP; if (wpsApEnable) { PopulateDot11fWscInProbeRes(pMac, &frm.WscProbeRes); } if (pMac->lim.wscIeInfo.probeRespWscEnrollmentState == eLIM_WSC_ENROLL_BEGIN) { PopulateDot11fWscRegistrarInfoInProbeRes(pMac, &frm.WscProbeRes); pMac->lim.wscIeInfo.probeRespWscEnrollmentState = eLIM_WSC_ENROLL_IN_PROGRESS; } if (pMac->lim.wscIeInfo.wscEnrollmentState == eLIM_WSC_ENROLL_END) { DePopulateDot11fWscRegistrarInfoInProbeRes(pMac, &frm.WscProbeRes); pMac->lim.wscIeInfo.probeRespWscEnrollmentState = eLIM_WSC_ENROLL_NOOP; } #ifdef WLAN_SOFTAP_FEATURE } #endif PopulateDot11fCountry( pMac, &frm.Country, psessionEntry); PopulateDot11fEDCAParamSet( pMac, &frm.EDCAParamSet, psessionEntry); #ifdef ANI_PRODUCT_TYPE_AP if( pMac->lim.gLim11hEnable ) { PopulateDot11fPowerConstraints( pMac, &frm.PowerConstraints ); PopulateDot11fTPCReport( pMac, &frm.TPCReport, psessionEntry); // If .11h isenabled & channel switching is not already started and // we're in either PRIMARY_ONLY or PRIMARY_AND_SECONDARY state, then // populate 802.11h channel switch IE if (( pMac->lim.gLimChannelSwitch.switchCount != 0 ) && ( pMac->lim.gLimChannelSwitch.state == eLIM_CHANNEL_SWITCH_PRIMARY_ONLY || pMac->lim.gLimChannelSwitch.state == eLIM_CHANNEL_SWITCH_PRIMARY_AND_SECONDARY ) ) { PopulateDot11fChanSwitchAnn( pMac, &frm.ChanSwitchAnn ); PopulateDot11fExtChanSwitchAnn(pMac, &frm.ExtChanSwitchAnn); } } #endif if (psessionEntry->dot11mode != WNI_CFG_DOT11_MODE_11B) PopulateDot11fERPInfo( pMac, &frm.ERPInfo, psessionEntry); // N.B. In earlier implementations, the RSN IE would be placed in // the frame here, before the WPA IE, if 'RSN_BEFORE_WPA' was defined. PopulateDot11fExtSuppRates( pMac, POPULATE_DOT11F_RATES_OPERATIONAL, &frm.ExtSuppRates, psessionEntry ); //Populate HT IEs, when operating in 11n or Taurus modes. if ( psessionEntry->htCapabality ) { PopulateDot11fHTCaps( pMac, &frm.HTCaps ); #ifdef WLAN_SOFTAP_FEATURE PopulateDot11fHTInfo( pMac, &frm.HTInfo, psessionEntry ); #else PopulateDot11fHTInfo( pMac, &frm.HTInfo ); #endif } if ( psessionEntry->pLimStartBssReq ) { PopulateDot11fWPA( pMac, &( psessionEntry->pLimStartBssReq->rsnIE ), &frm.WPA ); PopulateDot11fRSN( pMac, &( psessionEntry->pLimStartBssReq->rsnIE ), &frm.RSN ); } PopulateDot11fWMM( pMac, &frm.WMMInfoAp, &frm.WMMParams, &frm.WMMCaps, psessionEntry ); #if defined(FEATURE_WLAN_WAPI) if( psessionEntry->pLimStartBssReq ) { PopulateDot11fWAPI( pMac, &( psessionEntry->pLimStartBssReq->rsnIE ), &frm.WAPI ); } #endif // defined(FEATURE_WLAN_WAPI) nStatus = dot11fGetPackedProbeResponseSize( pMac, &frm, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed size f" "or a Probe Response (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fProbeResponse ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for a Probe Response " "(0x%08x).\n"), nStatus ); } nBytes = nPayload + sizeof( tSirMacMgmtHdr ); addnIEPresent = false; #ifdef WLAN_FEATURE_P2P if( pMac->lim.gpLimRemainOnChanReq ) { nBytes += (pMac->lim.gpLimRemainOnChanReq->length - sizeof( tSirRemainOnChnReq ) ); } //Only use CFG for non-listen mode. This CFG is not working for concurrency //In listening mode, probe rsp IEs is passed in the message from SME to PE else #endif { if (wlan_cfgGetInt(pMac, WNI_CFG_PROBE_RSP_ADDNIE_FLAG, &addnIEPresent) != eSIR_SUCCESS) { limLog(pMac, LOGP, FL("Unable to get WNI_CFG_PROBE_RSP_ADDNIE_FLAG")); return; } } if (addnIEPresent) { if( (palAllocateMemory(pMac->hHdd, (void**)&addIE, WNI_CFG_PROBE_RSP_ADDNIE_DATA1_LEN*3 )) != eHAL_STATUS_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("Unable to allocate memory to store addn IE"));) return; } //Probe rsp IE available if ( eSIR_SUCCESS != wlan_cfgGetStrLen(pMac, WNI_CFG_PROBE_RSP_ADDNIE_DATA1, &addnIE1Len) ) { limLog(pMac, LOGP, FL("Unable to get WNI_CFG_PROBE_RSP_ADDNIE_DATA1 length")); palFreeMemory(pMac->hHdd, addIE); return; } if (addnIE1Len <= WNI_CFG_PROBE_RSP_ADDNIE_DATA1_LEN && addnIE1Len && (nBytes + addnIE1Len) <= SIR_MAX_PACKET_SIZE) { if ( eSIR_SUCCESS != wlan_cfgGetStr(pMac, WNI_CFG_PROBE_RSP_ADDNIE_DATA1, &addIE[0], &addnIE1Len) ) { limLog(pMac, LOGP, FL("Unable to get WNI_CFG_PROBE_RSP_ADDNIE_DATA1 String")); palFreeMemory(pMac->hHdd, addIE); return; } } //Probe rsp IE available if ( eSIR_SUCCESS != wlan_cfgGetStrLen(pMac, WNI_CFG_PROBE_RSP_ADDNIE_DATA2, &addnIE2Len) ) { limLog(pMac, LOGP, FL("Unable to get WNI_CFG_PROBE_RSP_ADDNIE_DATA2 length")); palFreeMemory(pMac->hHdd, addIE); return; } if (addnIE2Len <= WNI_CFG_PROBE_RSP_ADDNIE_DATA2_LEN && addnIE2Len && (nBytes + addnIE2Len) <= SIR_MAX_PACKET_SIZE) { if ( eSIR_SUCCESS != wlan_cfgGetStr(pMac, WNI_CFG_PROBE_RSP_ADDNIE_DATA2, &addIE[addnIE1Len], &addnIE2Len) ) { limLog(pMac, LOGP, FL("Unable to get WNI_CFG_PROBE_RSP_ADDNIE_DATA2 String")); palFreeMemory(pMac->hHdd, addIE); return; } } //Probe rsp IE available if ( eSIR_SUCCESS != wlan_cfgGetStrLen(pMac, WNI_CFG_PROBE_RSP_ADDNIE_DATA3, &addnIE3Len) ) { limLog(pMac, LOGP, FL("Unable to get WNI_CFG_PROBE_RSP_ADDNIE_DATA3 length")); palFreeMemory(pMac->hHdd, addIE); return; } if (addnIE3Len <= WNI_CFG_PROBE_RSP_ADDNIE_DATA3_LEN && addnIE3Len && (nBytes + addnIE3Len) <= SIR_MAX_PACKET_SIZE) { if ( eSIR_SUCCESS != wlan_cfgGetStr(pMac, WNI_CFG_PROBE_RSP_ADDNIE_DATA3, &addIE[addnIE1Len + addnIE2Len], &addnIE3Len) ) { limLog(pMac, LOGP, FL("Unable to get WNI_CFG_PROBE_RSP_ADDNIE_DATA3 String")); palFreeMemory(pMac->hHdd, addIE); return; } } totalAddnIeLen = addnIE1Len + addnIE2Len + addnIE3Len; #ifdef WLAN_FEATURE_P2P if(eSIR_SUCCESS != limGetAddnIeForProbeResp(pMac, addIE, &totalAddnIeLen, probeReqP2pIe)) { limLog(pMac, LOGP, FL("Unable to get final Additional IE for Probe Req")); palFreeMemory(pMac->hHdd, addIE); return; } nBytes = nBytes + totalAddnIeLen; if (probeReqP2pIe) { pP2pIe = limGetP2pIEPtr(pMac, &addIE[0], totalAddnIeLen); if (pP2pIe != NULL) { //get NoA attribute stream P2P IE noaLen = limGetNoaAttrStream(pMac, noaStream, psessionEntry); if (noaLen != 0) { total_noaLen = limBuildP2pIe(pMac, &noaIe[0], &noaStream[0], noaLen); nBytes = nBytes + total_noaLen; } } } #endif } halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGP, FL("Failed to allocate %d bytes for a Pro" "be Response.\n"), nBytes ); if ( addIE != NULL ) { palFreeMemory(pMac->hHdd, addIE); } return; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Next, we fill out the buffer descriptor: nSirStatus = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_PROBE_RSP, peerMacAddr,psessionEntry->selfMacAddr); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to populate the buffer descrip" "tor for a Probe Response (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); if ( addIE != NULL ) { palFreeMemory(pMac->hHdd, addIE); } return; } pMacHdr = ( tpSirMacMgmtHdr ) pFrame; sirCopyMacAddr(pMacHdr->bssId,psessionEntry->bssId); // That done, pack the Probe Response: nStatus = dot11fPackProbeResponse( pMac, &frm, pFrame + sizeof(tSirMacMgmtHdr), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack a Probe Response (0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); if ( addIE != NULL ) { palFreeMemory(pMac->hHdd, addIE); } return; // allocated! } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing a P" "robe Response (0x%08x).\n") ); } PELOG3(limLog( pMac, LOG3, FL("Sending Probe Response frame to ") ); limPrintMacAddr( pMac, peerMacAddr, LOG3 );) pMac->sys.probeRespond++; #ifdef WLAN_FEATURE_P2P if( pMac->lim.gpLimRemainOnChanReq ) { palCopyMemory ( pMac->hHdd, pFrame+sizeof(tSirMacMgmtHdr)+nPayload, pMac->lim.gpLimRemainOnChanReq->probeRspIe, (pMac->lim.gpLimRemainOnChanReq->length - sizeof( tSirRemainOnChnReq )) ); } #endif if ( addnIEPresent ) { if (palCopyMemory ( pMac->hHdd, pFrame+sizeof(tSirMacMgmtHdr)+nPayload, &addIE[0], totalAddnIeLen) != eHAL_STATUS_SUCCESS) { limLog(pMac, LOGP, FL("Additional Probe Rp IE request failed while Appending: %x"),halstatus); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); if ( addIE != NULL ) { palFreeMemory(pMac->hHdd, addIE); } return; } } #ifdef WLAN_FEATURE_P2P if (noaLen != 0) { if (palCopyMemory ( pMac->hHdd, &pFrame[nBytes - (total_noaLen)], &noaIe[0], total_noaLen) != eHAL_STATUS_SUCCESS) { limLog(pMac, LOGE, FL("Not able to insert NoA because of length constraint")); } } #endif if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } // Queue Probe Response frame in high priority WQ halstatus = halTxFrame( ( tHalHandle ) pMac, pPacket, ( tANI_U16 ) nBytes, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_LOW, limTxComplete, pFrame, txFlag ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGE, FL("Could not send Probe Response.\n") ); //Pkt will be freed up by the callback } if ( addIE != NULL ) { palFreeMemory(pMac->hHdd, addIE); } } // End limSendProbeRspMgmtFrame. void limSendAddtsReqActionFrame(tpAniSirGlobal pMac, tSirMacAddr peerMacAddr, tSirAddtsReqInfo *pAddTS, tpPESession psessionEntry) { tANI_U16 i; tANI_U8 *pFrame; tSirRetStatus nSirStatus; tDot11fAddTSRequest AddTSReq; tDot11fWMMAddTSRequest WMMAddTSReq; tANI_U32 nPayload, nBytes, nStatus; tpSirMacMgmtHdr pMacHdr; void *pPacket; #ifdef FEATURE_WLAN_CCX tANI_U32 phyMode; #endif eHalStatus halstatus; tANI_U8 txFlag = 0; if(NULL == psessionEntry) { return; } if ( ! pAddTS->wmeTspecPresent ) { palZeroMemory( pMac->hHdd, ( tANI_U8* )&AddTSReq, sizeof( AddTSReq ) ); AddTSReq.Action.action = SIR_MAC_QOS_ADD_TS_REQ; AddTSReq.DialogToken.token = pAddTS->dialogToken; AddTSReq.Category.category = SIR_MAC_ACTION_QOS_MGMT; if ( pAddTS->lleTspecPresent ) { PopulateDot11fTSPEC( &pAddTS->tspec, &AddTSReq.TSPEC ); } else { PopulateDot11fWMMTSPEC( &pAddTS->tspec, &AddTSReq.WMMTSPEC ); } if ( pAddTS->lleTspecPresent ) { AddTSReq.num_WMMTCLAS = 0; AddTSReq.num_TCLAS = pAddTS->numTclas; for ( i = 0; i < pAddTS->numTclas; ++i) { PopulateDot11fTCLAS( pMac, &pAddTS->tclasInfo[i], &AddTSReq.TCLAS[i] ); } } else { AddTSReq.num_TCLAS = 0; AddTSReq.num_WMMTCLAS = pAddTS->numTclas; for ( i = 0; i < pAddTS->numTclas; ++i) { PopulateDot11fWMMTCLAS( pMac, &pAddTS->tclasInfo[i], &AddTSReq.WMMTCLAS[i] ); } } if ( pAddTS->tclasProcPresent ) { if ( pAddTS->lleTspecPresent ) { AddTSReq.TCLASSPROC.processing = pAddTS->tclasProc; AddTSReq.TCLASSPROC.present = 1; } else { AddTSReq.WMMTCLASPROC.version = 1; AddTSReq.WMMTCLASPROC.processing = pAddTS->tclasProc; AddTSReq.WMMTCLASPROC.present = 1; } } nStatus = dot11fGetPackedAddTSRequestSize( pMac, &AddTSReq, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed size f" "or an Add TS Request (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fAddTSRequest ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for an Add TS Request" " (0x%08x).\n"), nStatus ); } } else { palZeroMemory( pMac->hHdd, ( tANI_U8* )&WMMAddTSReq, sizeof( WMMAddTSReq ) ); WMMAddTSReq.Action.action = SIR_MAC_QOS_ADD_TS_REQ; WMMAddTSReq.DialogToken.token = pAddTS->dialogToken; WMMAddTSReq.Category.category = SIR_MAC_ACTION_WME; // WMM spec 2.2.10 - status code is only filled in for ADDTS response WMMAddTSReq.StatusCode.statusCode = 0; PopulateDot11fWMMTSPEC( &pAddTS->tspec, &WMMAddTSReq.WMMTSPEC ); #ifdef FEATURE_WLAN_CCX limGetPhyMode(pMac, &phyMode, psessionEntry); if( phyMode == WNI_CFG_PHY_MODE_11G || phyMode == WNI_CFG_PHY_MODE_11A) { pAddTS->tsrsIE.rates[0] = TSRS_11AG_RATE_6MBPS; } else { pAddTS->tsrsIE.rates[0] = TSRS_11B_RATE_5_5MBPS; } PopulateDot11TSRSIE(pMac,&pAddTS->tsrsIE, &WMMAddTSReq.CCXTrafStrmRateSet,sizeof(tANI_U8)); #endif // fillWmeTspecIE nStatus = dot11fGetPackedWMMAddTSRequestSize( pMac, &WMMAddTSReq, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed size f" "or a WMM Add TS Request (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fAddTSRequest ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for a WMM Add TS Requ" "est (0x%08x).\n"), nStatus ); } } nBytes = nPayload + sizeof( tSirMacMgmtHdr ); halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGP, FL("Failed to allocate %d bytes for an Ad" "d TS Request.\n"), nBytes ); return; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Next, we fill out the buffer descriptor: nSirStatus = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_ACTION, peerMacAddr,psessionEntry->selfMacAddr); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to populate the buffer descrip" "tor for an Add TS Request (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; } pMacHdr = ( tpSirMacMgmtHdr ) pFrame; #if 0 cfgLen = SIR_MAC_ADDR_LENGTH; if ( eSIR_SUCCESS != wlan_cfgGetStr( pMac, WNI_CFG_BSSID, ( tANI_U8* )pMacHdr->bssId, &cfgLen ) ) { limLog( pMac, LOGP, FL("Failed to retrieve WNI_CFG_BSSID whil" "e sending an Add TS Request.\n") ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; } #endif //TO SUPPORT BT-AMP sirCopyMacAddr(pMacHdr->bssId,psessionEntry->bssId); // That done, pack the struct: if ( ! pAddTS->wmeTspecPresent ) { nStatus = dot11fPackAddTSRequest( pMac, &AddTSReq, pFrame + sizeof(tSirMacMgmtHdr), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack an Add TS Request " "(0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // allocated! } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing" "an Add TS Request (0x%08x).\n") ); } } else { nStatus = dot11fPackWMMAddTSRequest( pMac, &WMMAddTSReq, pFrame + sizeof(tSirMacMgmtHdr), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack a WMM Add TS Reque" "st (0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // allocated! } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing" "a WMM Add TS Request (0x%08x).\n") ); } } PELOG3(limLog( pMac, LOG3, FL("Sending an Add TS Request frame to ") ); limPrintMacAddr( pMac, peerMacAddr, LOG3 );) if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } // Queue Addts Response frame in high priority WQ halstatus = halTxFrame( pMac, pPacket, ( tANI_U16 ) nBytes, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, txFlag ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGE, FL( "*** Could not send an Add TS Request" " (%X) ***\n" ), halstatus ); //Pkt will be freed up by the callback } } // End limSendAddtsReqActionFrame. /* Added ANI_PRODUCT_TYPE_CLIENT for BT-AMP Support */ #ifdef ANI_PRODUCT_TYPE_AP void limSendAssocRspMgmtFrame(tpAniSirGlobal pMac, tANI_U16 statusCode, tANI_U16 aid, tSirMacAddr peerMacAddr, tANI_U8 subType, tpDphHashNode pSta, tpPESession psessionEntry) { tDot11fAssocResponse frm; tANI_U8 *pFrame, *macAddr; tpSirMacMgmtHdr pMacHdr; tSirRetStatus nSirStatus; tANI_U8 lleMode = 0, fAddTS, edcaInclude = 0; tHalBitVal qosMode, wmeMode; tANI_U32 nPayload, nBytes, nStatus, cfgLen; void *pPacket; eHalStatus halstatus; tUpdateBeaconParams beaconParams; tANI_U32 wpsApEnable=0, tmp; tANI_U8 txFlag = 0; palZeroMemory( pMac->hHdd, ( tANI_U8* )&frm, sizeof( frm ) ); limGetQosMode(pMac, &qosMode); limGetWmeMode(pMac, &wmeMode); // An Add TS IE is added only if the AP supports it and the requesting // STA sent a traffic spec. fAddTS = ( qosMode && pSta && pSta->qos.addtsPresent ) ? 1 : 0; PopulateDot11fCapabilities( pMac, &frm.Capabilities, psessionEntry); frm.Status.status = statusCode; frm.AID.associd = aid | LIM_AID_MASK; PopulateDot11fSuppRates( pMac, POPULATE_DOT11F_RATES_OPERATIONAL, &frm.SuppRates ); if (wlan_cfgGetInt(pMac, (tANI_U16) WNI_CFG_WPS_ENABLE, &tmp) != eSIR_SUCCESS) limLog(pMac, LOGP,"Failed to cfg get id %d\n", WNI_CFG_WPS_ENABLE ); wpsApEnable = tmp & WNI_CFG_WPS_ENABLE_AP; if (wpsApEnable) { PopulateDot11fWscInAssocRes(pMac, &frm.WscAssocRes); } PopulateDot11fExtSuppRates( pMac, POPULATE_DOT11F_RATES_OPERATIONAL, &frm.ExtSuppRates, psessionEntry ); if ( NULL != pSta ) { if ( eHAL_SET == qosMode ) { if ( pSta->lleEnabled ) { lleMode = 1; if ( ( ! pSta->aniPeer ) || ( ! PROP_CAPABILITY_GET( 11EQOS, pSta->propCapability ) ) ) { PopulateDot11fEDCAParamSet( pMac, &frm.EDCAParamSet, psessionEntry); // FramesToDo:... // if ( fAddTS ) // { // tANI_U8 *pAf = pBody; // *pAf++ = SIR_MAC_QOS_ACTION_EID; // tANI_U32 tlen; // status = sirAddtsRspFill(pMac, pAf, statusCode, &pSta->qos.addts, NULL, // &tlen, bufLen - frameLen); // } // End if on Add TS. } } // End if on .11e enabled in 'pSta'. } // End if on QOS Mode on. if ( ( ! lleMode ) && ( eHAL_SET == wmeMode ) && pSta->wmeEnabled ) { if ( ( ! pSta->aniPeer ) || ( ! PROP_CAPABILITY_GET( WME, pSta->propCapability ) ) ) { PopulateDot11fWMMParams( pMac, &frm.WMMParams ); if ( pSta->wsmEnabled ) { PopulateDot11fWMMCaps(&frm.WMMCaps ); } } } if ( pSta->aniPeer ) { if ( ( lleMode && PROP_CAPABILITY_GET( 11EQOS, pSta->propCapability ) ) || ( pSta->wmeEnabled && PROP_CAPABILITY_GET( WME, pSta->propCapability ) ) ) { edcaInclude = 1; } } // End if on Airgo peer. if ( pSta->mlmStaContext.htCapability && pMac->lim.htCapability ) { PopulateDot11fHTCaps( pMac, &frm.HTCaps ); PopulateDot11fHTInfo( pMac, &frm.HTInfo ); } } // End if on non-NULL 'pSta'. if(pMac->lim.gLimProtectionControl != WNI_CFG_FORCE_POLICY_PROTECTION_DISABLE) limDecideApProtection(pMac, peerMacAddr, &beaconParams); limUpdateShortPreamble(pMac, peerMacAddr, &beaconParams); limUpdateShortSlotTime(pMac, peerMacAddr, &beaconParams); //Send message to HAL about beacon parameter change. if(beaconParams.paramChangeBitmap) { schSetFixedBeaconFields(pMac,psessionEntry); limSendBeaconParams(pMac, &beaconParams, psessionEntry ); } // Allocate a buffer for this frame: nStatus = dot11fGetPackedAssocResponseSize( pMac, &frm, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to calculate the packed size f" "or an Association Response (0x%08x).\n"), nStatus ); return; } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for an Association Re" "sponse (0x%08x).\n"), nStatus ); } nBytes = sizeof( tSirMacMgmtHdr ) + nPayload; halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog(pMac, LOGP, FL("Call to bufAlloc failed for RE/ASSOC RSP.\n")); return; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Next, we fill out the buffer descriptor: nSirStatus = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, ( LIM_ASSOC == subType ) ? SIR_MAC_MGMT_ASSOC_RSP : SIR_MAC_MGMT_REASSOC_RSP, peerMacAddr); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to populate the buffer descrip" "tor for an Association Response (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; } pMacHdr = ( tpSirMacMgmtHdr ) pFrame; cfgLen = SIR_MAC_ADDR_LENGTH; if ( eSIR_SUCCESS != wlan_cfgGetStr( pMac, WNI_CFG_BSSID, ( tANI_U8* )pMacHdr->bssId, &cfgLen ) ) { limLog( pMac, LOGP, FL("Failed to retrieve WNI_CFG_BSSID whil" "e sending an Association Response.\n") ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // allocated! } nStatus = dot11fPackAssocResponse( pMac, &frm, pFrame + sizeof( tSirMacMgmtHdr ), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack an Association Response (0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // allocated! } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing an " "Association Response (0x%08x).\n") ); } macAddr = pMacHdr->da; if (subType == LIM_ASSOC) limLog(pMac, LOG1, FL("*** Sending Assoc Resp status %d aid %d to "), statusCode, aid); else limLog(pMac, LOG1, FL("*** Sending ReAssoc Resp status %d aid %d to "), statusCode, aid); limPrintMacAddr(pMac, pMacHdr->da, LOG1); if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } /// Queue Association Response frame in high priority WQ halstatus = halTxFrame( pMac, pPacket, ( tANI_U16 ) nBytes, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, txFlag ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog(pMac, LOGE, FL("*** Could not Send Re/AssocRsp, retCode=%X ***\n"), nSirStatus); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, (void *) pFrame, (void *) pPacket ); } // update the ANI peer station count //FIXME_PROTECTION : take care of different type of station // counter inside this function. limUtilCountStaAdd(pMac, pSta, psessionEntry); } // End limSendAssocRspMgmtFrame. #endif // ANI_PRODUCT_TYPE_AP void limSendAssocRspMgmtFrame(tpAniSirGlobal pMac, tANI_U16 statusCode, tANI_U16 aid, tSirMacAddr peerMacAddr, tANI_U8 subType, tpDphHashNode pSta,tpPESession psessionEntry) { static tDot11fAssocResponse frm; tANI_U8 *pFrame, *macAddr; tpSirMacMgmtHdr pMacHdr; tSirRetStatus nSirStatus; tANI_U8 lleMode = 0, fAddTS, edcaInclude = 0; tHalBitVal qosMode, wmeMode; tANI_U32 nPayload, nBytes, nStatus; void *pPacket; eHalStatus halstatus; tUpdateBeaconParams beaconParams; tANI_U8 txFlag = 0; tANI_U32 addnIEPresent = false; tANI_U32 addnIELen=0; tANI_U8 addIE[WNI_CFG_ASSOC_RSP_ADDNIE_DATA_LEN]; tpSirAssocReq pAssocReq = NULL; if(NULL == psessionEntry) { return; } palZeroMemory( pMac->hHdd, ( tANI_U8* )&frm, sizeof( frm ) ); limGetQosMode(psessionEntry, &qosMode); limGetWmeMode(psessionEntry, &wmeMode); // An Add TS IE is added only if the AP supports it and the requesting // STA sent a traffic spec. fAddTS = ( qosMode && pSta && pSta->qos.addtsPresent ) ? 1 : 0; PopulateDot11fCapabilities( pMac, &frm.Capabilities, psessionEntry ); frm.Status.status = statusCode; frm.AID.associd = aid | LIM_AID_MASK; if ( NULL == pSta ) { PopulateDot11fSuppRates( pMac, POPULATE_DOT11F_RATES_OPERATIONAL, &frm.SuppRates,psessionEntry); PopulateDot11fExtSuppRates( pMac, POPULATE_DOT11F_RATES_OPERATIONAL, &frm.ExtSuppRates, psessionEntry ); } else { PopulateDot11fAssocRspRates( pMac, &frm.SuppRates, &frm.ExtSuppRates, pSta->supportedRates.llbRates, pSta->supportedRates.llaRates ); } #ifdef WLAN_SOFTAP_FEATURE if(psessionEntry->limSystemRole == eLIM_AP_ROLE) { if( pSta != NULL && eSIR_SUCCESS == statusCode ) { pAssocReq = (tpSirAssocReq) psessionEntry->parsedAssocReq[pSta->assocId]; #ifdef WLAN_FEATURE_P2P /* populate P2P IE in AssocRsp when assocReq from the peer includes P2P IE */ if( pAssocReq != NULL && pAssocReq->addIEPresent ) { PopulateDot11AssocResP2PIE(pMac, &frm.P2PAssocRes, pAssocReq); } #endif } } #endif if ( NULL != pSta ) { if ( eHAL_SET == qosMode ) { if ( pSta->lleEnabled ) { lleMode = 1; if ( ( ! pSta->aniPeer ) || ( ! PROP_CAPABILITY_GET( 11EQOS, pSta->propCapability ) ) ) { PopulateDot11fEDCAParamSet( pMac, &frm.EDCAParamSet, psessionEntry); // FramesToDo:... // if ( fAddTS ) // { // tANI_U8 *pAf = pBody; // *pAf++ = SIR_MAC_QOS_ACTION_EID; // tANI_U32 tlen; // status = sirAddtsRspFill(pMac, pAf, statusCode, &pSta->qos.addts, NULL, // &tlen, bufLen - frameLen); // } // End if on Add TS. } } // End if on .11e enabled in 'pSta'. } // End if on QOS Mode on. if ( ( ! lleMode ) && ( eHAL_SET == wmeMode ) && pSta->wmeEnabled ) { if ( ( ! pSta->aniPeer ) || ( ! PROP_CAPABILITY_GET( WME, pSta->propCapability ) ) ) { #ifdef WLAN_SOFTAP_FEATURE PopulateDot11fWMMParams( pMac, &frm.WMMParams, psessionEntry); #else PopulateDot11fWMMParams( pMac, &frm.WMMParams ); #endif if ( pSta->wsmEnabled ) { PopulateDot11fWMMCaps(&frm.WMMCaps ); } } } if ( pSta->aniPeer ) { if ( ( lleMode && PROP_CAPABILITY_GET( 11EQOS, pSta->propCapability ) ) || ( pSta->wmeEnabled && PROP_CAPABILITY_GET( WME, pSta->propCapability ) ) ) { edcaInclude = 1; } } // End if on Airgo peer. if ( pSta->mlmStaContext.htCapability && psessionEntry->htCapabality ) { PopulateDot11fHTCaps( pMac, &frm.HTCaps ); #ifdef WLAN_SOFTAP_FEATURE PopulateDot11fHTInfo( pMac, &frm.HTInfo, psessionEntry ); #else PopulateDot11fHTInfo( pMac, &frm.HTInfo ); #endif } } // End if on non-NULL 'pSta'. palZeroMemory( pMac->hHdd, ( tANI_U8* )&beaconParams, sizeof( tUpdateBeaconParams) ); #ifdef WLAN_SOFTAP_FEATURE if( psessionEntry->limSystemRole == eLIM_AP_ROLE ){ if(psessionEntry->gLimProtectionControl != WNI_CFG_FORCE_POLICY_PROTECTION_DISABLE) limDecideApProtection(pMac, peerMacAddr, &beaconParams,psessionEntry); } #endif limUpdateShortPreamble(pMac, peerMacAddr, &beaconParams, psessionEntry); limUpdateShortSlotTime(pMac, peerMacAddr, &beaconParams, psessionEntry); beaconParams.bssIdx = psessionEntry->bssIdx; //Send message to HAL about beacon parameter change. if(beaconParams.paramChangeBitmap) { schSetFixedBeaconFields(pMac,psessionEntry); limSendBeaconParams(pMac, &beaconParams, psessionEntry ); } // Allocate a buffer for this frame: nStatus = dot11fGetPackedAssocResponseSize( pMac, &frm, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to calculate the packed size f" "or an Association Response (0x%08x).\n"), nStatus ); return; } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for an Association Re" "sponse (0x%08x).\n"), nStatus ); } nBytes = sizeof( tSirMacMgmtHdr ) + nPayload; if ( pAssocReq != NULL ) { if (wlan_cfgGetInt(pMac, WNI_CFG_ASSOC_RSP_ADDNIE_FLAG, &addnIEPresent) != eSIR_SUCCESS) { limLog(pMac, LOGP, FL("Unable to get WNI_CFG_ASSOC_RSP_ADDNIE_FLAG")); return; } if (addnIEPresent) { //Assoc rsp IE available if (wlan_cfgGetStrLen(pMac, WNI_CFG_ASSOC_RSP_ADDNIE_DATA, &addnIELen) != eSIR_SUCCESS) { limLog(pMac, LOGP, FL("Unable to get WNI_CFG_ASSOC_RSP_ADDNIE_DATA length")); return; } if (addnIELen <= WNI_CFG_ASSOC_RSP_ADDNIE_DATA_LEN && addnIELen && (nBytes + addnIELen) <= SIR_MAX_PACKET_SIZE) { if (wlan_cfgGetStr(pMac, WNI_CFG_ASSOC_RSP_ADDNIE_DATA, &addIE[0], &addnIELen) == eSIR_SUCCESS) { nBytes = nBytes + addnIELen; } } } } halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog(pMac, LOGP, FL("Call to bufAlloc failed for RE/ASSOC RSP.\n")); return; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Next, we fill out the buffer descriptor: nSirStatus = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, ( LIM_ASSOC == subType ) ? SIR_MAC_MGMT_ASSOC_RSP : SIR_MAC_MGMT_REASSOC_RSP, peerMacAddr,psessionEntry->selfMacAddr); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to populate the buffer descrip" "tor for an Association Response (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; } pMacHdr = ( tpSirMacMgmtHdr ) pFrame; #if 0 cfgLen = SIR_MAC_ADDR_LENGTH; if ( eSIR_SUCCESS != cfgGetStr( pMac, WNI_CFG_BSSID, ( tANI_U8* )pMacHdr->bssId, &cfgLen ) ) { limLog( pMac, LOGP, FL("Failed to retrieve WNI_CFG_BSSID whil" "e sending an Association Response.\n") ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // allocated! } #endif //TO SUPPORT BT-AMP sirCopyMacAddr(pMacHdr->bssId,psessionEntry->bssId); nStatus = dot11fPackAssocResponse( pMac, &frm, pFrame + sizeof( tSirMacMgmtHdr ), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack an Association Response (0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // allocated! } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing an " "Association Response (0x%08x).\n") ); } macAddr = pMacHdr->da; if (subType == LIM_ASSOC) { PELOG1(limLog(pMac, LOG1, FL("*** Sending Assoc Resp status %d aid %d to "), statusCode, aid);) } else{ PELOG1(limLog(pMac, LOG1, FL("*** Sending ReAssoc Resp status %d aid %d to "), statusCode, aid);) } PELOG1(limPrintMacAddr(pMac, pMacHdr->da, LOG1);) if ( addnIEPresent ) { if (palCopyMemory ( pMac->hHdd, pFrame+sizeof(tSirMacMgmtHdr)+nPayload, &addIE[0], addnIELen ) != eHAL_STATUS_SUCCESS) { limLog(pMac, LOGP, FL("Additional Assoc IEs request failed while Appending: %x\n"),halstatus); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; } } if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } /// Queue Association Response frame in high priority WQ halstatus = halTxFrame( pMac, pPacket, ( tANI_U16 ) nBytes, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, txFlag ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog(pMac, LOGE, FL("*** Could not Send Re/AssocRsp, retCode=%X ***\n"), nSirStatus); //Pkt will be freed up by the callback } // update the ANI peer station count //FIXME_PROTECTION : take care of different type of station // counter inside this function. limUtilCountStaAdd(pMac, pSta, psessionEntry); } // End limSendAssocRspMgmtFrame. void limSendAddtsRspActionFrame(tpAniSirGlobal pMac, tSirMacAddr peer, tANI_U16 nStatusCode, tSirAddtsReqInfo *pAddTS, tSirMacScheduleIE *pSchedule, tpPESession psessionEntry) { tANI_U8 *pFrame; tpSirMacMgmtHdr pMacHdr; tDot11fAddTSResponse AddTSRsp; tDot11fWMMAddTSResponse WMMAddTSRsp; tSirRetStatus nSirStatus; tANI_U32 i, nBytes, nPayload, nStatus; void *pPacket; eHalStatus halstatus; tANI_U8 txFlag = 0; if(NULL == psessionEntry) { return; } if ( ! pAddTS->wmeTspecPresent ) { palZeroMemory( pMac->hHdd, ( tANI_U8* )&AddTSRsp, sizeof( AddTSRsp ) ); AddTSRsp.Category.category = SIR_MAC_ACTION_QOS_MGMT; AddTSRsp.Action.action = SIR_MAC_QOS_ADD_TS_RSP; AddTSRsp.DialogToken.token = pAddTS->dialogToken; AddTSRsp.Status.status = nStatusCode; // The TsDelay information element is only filled in for a specific // status code: if ( eSIR_MAC_TS_NOT_CREATED_STATUS == nStatusCode ) { if ( pAddTS->wsmTspecPresent ) { AddTSRsp.WMMTSDelay.version = 1; AddTSRsp.WMMTSDelay.delay = 10; AddTSRsp.WMMTSDelay.present = 1; } else { AddTSRsp.TSDelay.delay = 10; AddTSRsp.TSDelay.present = 1; } } if ( pAddTS->wsmTspecPresent ) { PopulateDot11fWMMTSPEC( &pAddTS->tspec, &AddTSRsp.WMMTSPEC ); } else { PopulateDot11fTSPEC( &pAddTS->tspec, &AddTSRsp.TSPEC ); } if ( pAddTS->wsmTspecPresent ) { AddTSRsp.num_WMMTCLAS = 0; AddTSRsp.num_TCLAS = pAddTS->numTclas; for ( i = 0; i < AddTSRsp.num_TCLAS; ++i) { PopulateDot11fTCLAS( pMac, &pAddTS->tclasInfo[i], &AddTSRsp.TCLAS[i] ); } } else { AddTSRsp.num_TCLAS = 0; AddTSRsp.num_WMMTCLAS = pAddTS->numTclas; for ( i = 0; i < AddTSRsp.num_WMMTCLAS; ++i) { PopulateDot11fWMMTCLAS( pMac, &pAddTS->tclasInfo[i], &AddTSRsp.WMMTCLAS[i] ); } } if ( pAddTS->tclasProcPresent ) { if ( pAddTS->wsmTspecPresent ) { AddTSRsp.WMMTCLASPROC.version = 1; AddTSRsp.WMMTCLASPROC.processing = pAddTS->tclasProc; AddTSRsp.WMMTCLASPROC.present = 1; } else { AddTSRsp.TCLASSPROC.processing = pAddTS->tclasProc; AddTSRsp.TCLASSPROC.present = 1; } } // schedule element is included only if requested in the tspec and we are // using hcca (or both edca and hcca) // 11e-D8.0 is inconsistent on whether the schedule element is included // based on tspec schedule bit or not. Sec 7.4.2.2. says one thing but // pg 46, line 17-18 says something else. So just include it and let the // sta figure it out if ((pSchedule != NULL) && ((pAddTS->tspec.tsinfo.traffic.accessPolicy == SIR_MAC_ACCESSPOLICY_HCCA) || (pAddTS->tspec.tsinfo.traffic.accessPolicy == SIR_MAC_ACCESSPOLICY_BOTH))) { if ( pAddTS->wsmTspecPresent ) { PopulateDot11fWMMSchedule( pSchedule, &AddTSRsp.WMMSchedule ); } else { PopulateDot11fSchedule( pSchedule, &AddTSRsp.Schedule ); } } nStatus = dot11fGetPackedAddTSResponseSize( pMac, &AddTSRsp, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed si" "ze for an Add TS Response (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fAddTSResponse ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calcula" "tingthe packed size for an Add TS" " Response (0x%08x).\n"), nStatus ); } } else { palZeroMemory( pMac->hHdd, ( tANI_U8* )&WMMAddTSRsp, sizeof( WMMAddTSRsp ) ); WMMAddTSRsp.Category.category = SIR_MAC_ACTION_WME; WMMAddTSRsp.Action.action = SIR_MAC_QOS_ADD_TS_RSP; WMMAddTSRsp.DialogToken.token = pAddTS->dialogToken; WMMAddTSRsp.StatusCode.statusCode = (tANI_U8)nStatusCode; PopulateDot11fWMMTSPEC( &pAddTS->tspec, &WMMAddTSRsp.WMMTSPEC ); nStatus = dot11fGetPackedWMMAddTSResponseSize( pMac, &WMMAddTSRsp, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed si" "ze for a WMM Add TS Response (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fWMMAddTSResponse ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calcula" "tingthe packed size for a WMM Add" "TS Response (0x%08x).\n"), nStatus ); } } nBytes = nPayload + sizeof( tSirMacMgmtHdr ); halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGP, FL("Failed to allocate %d bytes for an Ad" "d TS Response.\n"), nBytes ); return; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Next, we fill out the buffer descriptor: nSirStatus = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_ACTION, peer,psessionEntry->selfMacAddr); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to populate the buffer descrip" "tor for an Add TS Response (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // allocated! } pMacHdr = ( tpSirMacMgmtHdr ) pFrame; #if 0 if ( eSIR_SUCCESS != wlan_cfgGetStr( pMac, WNI_CFG_BSSID, ( tANI_U8* )pMacHdr->bssId, &cfgLen ) ) { limLog( pMac, LOGP, FL("Failed to retrieve WNI_CFG_BSSID whil" "e sending an Add TS Response.\n") ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // allocated! } #endif //TO SUPPORT BT-AMP sirCopyMacAddr(pMacHdr->bssId,psessionEntry->bssId); // That done, pack the struct: if ( ! pAddTS->wmeTspecPresent ) { nStatus = dot11fPackAddTSResponse( pMac, &AddTSRsp, pFrame + sizeof( tSirMacMgmtHdr ), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack an Add TS Response " "(0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing" "an Add TS Response (0x%08x).\n") ); } } else { nStatus = dot11fPackWMMAddTSResponse( pMac, &WMMAddTSRsp, pFrame + sizeof( tSirMacMgmtHdr ), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack a WMM Add TS Response " "(0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing" "a WMM Add TS Response (0x%08x).\n") ); } } PELOG1(limLog( pMac, LOG1, FL("Sending an Add TS Response (status %d) to "), nStatusCode ); limPrintMacAddr( pMac, pMacHdr->da, LOG1 );) if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } // Queue the frame in high priority WQ: halstatus = halTxFrame( pMac, pPacket, ( tANI_U16 ) nBytes, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, txFlag ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGE, FL("Failed to send Add TS Response (%X)!\n"), nSirStatus ); //Pkt will be freed up by the callback } } // End limSendAddtsRspActionFrame. void limSendDeltsReqActionFrame(tpAniSirGlobal pMac, tSirMacAddr peer, tANI_U8 wmmTspecPresent, tSirMacTSInfo *pTsinfo, tSirMacTspecIE *pTspecIe, tpPESession psessionEntry) { tANI_U8 *pFrame; tpSirMacMgmtHdr pMacHdr; tDot11fDelTS DelTS; tDot11fWMMDelTS WMMDelTS; tSirRetStatus nSirStatus; tANI_U32 nBytes, nPayload, nStatus; void *pPacket; eHalStatus halstatus; tANI_U8 txFlag = 0; if(NULL == psessionEntry) { return; } if ( ! wmmTspecPresent ) { palZeroMemory( pMac->hHdd, ( tANI_U8* )&DelTS, sizeof( DelTS ) ); DelTS.Category.category = SIR_MAC_ACTION_QOS_MGMT; DelTS.Action.action = SIR_MAC_QOS_DEL_TS_REQ; PopulateDot11fTSInfo( pTsinfo, &DelTS.TSInfo ); nStatus = dot11fGetPackedDelTSSize( pMac, &DelTS, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed si" "ze for a Del TS (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fDelTS ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calcula" "ting the packed size for a Del TS" " (0x%08x).\n"), nStatus ); } } else { palZeroMemory( pMac->hHdd, ( tANI_U8* )&WMMDelTS, sizeof( WMMDelTS ) ); WMMDelTS.Category.category = SIR_MAC_ACTION_WME; WMMDelTS.Action.action = SIR_MAC_QOS_DEL_TS_REQ; WMMDelTS.DialogToken.token = 0; WMMDelTS.StatusCode.statusCode = 0; PopulateDot11fWMMTSPEC( pTspecIe, &WMMDelTS.WMMTSPEC ); nStatus = dot11fGetPackedWMMDelTSSize( pMac, &WMMDelTS, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed si" "ze for a WMM Del TS (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fDelTS ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calcula" "ting the packed size for a WMM De" "l TS (0x%08x).\n"), nStatus ); } } nBytes = nPayload + sizeof( tSirMacMgmtHdr ); halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGP, FL("Failed to allocate %d bytes for an Ad" "d TS Response.\n"), nBytes ); return; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Next, we fill out the buffer descriptor: nSirStatus = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_ACTION, peer, psessionEntry->selfMacAddr); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to populate the buffer descrip" "tor for an Add TS Response (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // allocated! } pMacHdr = ( tpSirMacMgmtHdr ) pFrame; #if 0 cfgLen = SIR_MAC_ADDR_LENGTH; if ( eSIR_SUCCESS != wlan_cfgGetStr( pMac, WNI_CFG_BSSID, ( tANI_U8* )pMacHdr->bssId, &cfgLen ) ) { limLog( pMac, LOGP, FL("Failed to retrieve WNI_CFG_BSSID whil" "e sending an Add TS Response.\n") ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // allocated! } #endif //TO SUPPORT BT-AMP sirCopyMacAddr(pMacHdr->bssId, psessionEntry->bssId); // That done, pack the struct: if ( !wmmTspecPresent ) { nStatus = dot11fPackDelTS( pMac, &DelTS, pFrame + sizeof( tSirMacMgmtHdr ), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack a Del TS frame (0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // allocated! } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing" "a Del TS frame (0x%08x).\n") ); } } else { nStatus = dot11fPackWMMDelTS( pMac, &WMMDelTS, pFrame + sizeof( tSirMacMgmtHdr ), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack a WMM Del TS frame (0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // allocated! } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing" "a WMM Del TS frame (0x%08x).\n") ); } } PELOG1(limLog(pMac, LOG1, FL("Sending DELTS REQ (size %d) to "), nBytes); limPrintMacAddr(pMac, pMacHdr->da, LOG1);) if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } halstatus = halTxFrame( pMac, pPacket, ( tANI_U16 ) nBytes, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, txFlag ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGE, FL("Failed to send Del TS (%X)!\n"), nSirStatus ); //Pkt will be freed up by the callback } } // End limSendDeltsReqActionFrame. void limSendAssocReqMgmtFrame(tpAniSirGlobal pMac, tLimMlmAssocReq *pMlmAssocReq, tpPESession psessionEntry) { tDot11fAssocRequest frm; tANI_U16 caps; tANI_U8 *pFrame; tSirRetStatus nSirStatus; tLimMlmAssocCnf mlmAssocCnf; tANI_U32 nBytes, nPayload, nStatus; tANI_U8 fQosEnabled, fWmeEnabled, fWsmEnabled; void *pPacket; eHalStatus halstatus; tANI_U16 nAddIELen; tANI_U8 *pAddIE; tANI_U8 *wpsIe = NULL; #if defined WLAN_FEATURE_VOWIFI tANI_U8 PowerCapsPopulated = FALSE; #endif tANI_U8 txFlag = 0; if(NULL == psessionEntry) { return; } if(NULL == psessionEntry->pLimJoinReq) { return; } /* check this early to avoid unncessary operation */ if(NULL == psessionEntry->pLimJoinReq) { return; } nAddIELen = psessionEntry->pLimJoinReq->addIEAssoc.length; pAddIE = psessionEntry->pLimJoinReq->addIEAssoc.addIEdata; palZeroMemory( pMac->hHdd, ( tANI_U8* )&frm, sizeof( frm ) ); caps = pMlmAssocReq->capabilityInfo; if ( PROP_CAPABILITY_GET( 11EQOS, psessionEntry->limCurrentBssPropCap ) ) ((tSirMacCapabilityInfo *) &caps)->qos = 0; #if defined(FEATURE_WLAN_WAPI) /* CR: 262463 : According to WAPI standard: 7.3.1.4 Capability Information field In WAPI, non-AP STAs within an ESS set the Privacy subfield to 0 in transmitted Association or Reassociation management frames. APs ignore the Privacy subfield within received Association and Reassociation management frames. */ if ( psessionEntry->encryptType == eSIR_ED_WPI) ((tSirMacCapabilityInfo *) &caps)->privacy = 0; #endif swapBitField16(caps, ( tANI_U16* )&frm.Capabilities ); frm.ListenInterval.interval = pMlmAssocReq->listenInterval; PopulateDot11fSSID2( pMac, &frm.SSID ); PopulateDot11fSuppRates( pMac, POPULATE_DOT11F_RATES_OPERATIONAL, &frm.SuppRates,psessionEntry); fQosEnabled = ( psessionEntry->limQosEnabled) && SIR_MAC_GET_QOS( psessionEntry->limCurrentBssCaps ); fWmeEnabled = ( psessionEntry->limWmeEnabled ) && LIM_BSS_CAPS_GET( WME, psessionEntry->limCurrentBssQosCaps ); // We prefer .11e asociations: if ( fQosEnabled ) fWmeEnabled = false; fWsmEnabled = ( psessionEntry->limWsmEnabled ) && fWmeEnabled && LIM_BSS_CAPS_GET( WSM, psessionEntry->limCurrentBssQosCaps ); if ( psessionEntry->lim11hEnable && psessionEntry->pLimJoinReq->spectrumMgtIndicator == eSIR_TRUE ) { #if defined WLAN_FEATURE_VOWIFI PowerCapsPopulated = TRUE; PopulateDot11fPowerCaps( pMac, &frm.PowerCaps, LIM_ASSOC,psessionEntry); #endif PopulateDot11fSuppChannels( pMac, &frm.SuppChannels, LIM_ASSOC,psessionEntry); } #if defined WLAN_FEATURE_VOWIFI if( pMac->rrm.rrmPEContext.rrmEnable && SIR_MAC_GET_RRM( psessionEntry->limCurrentBssCaps ) ) { if (PowerCapsPopulated == FALSE) { PowerCapsPopulated = TRUE; PopulateDot11fPowerCaps(pMac, &frm.PowerCaps, LIM_ASSOC, psessionEntry); } } #endif if ( fQosEnabled && ( ! PROP_CAPABILITY_GET(11EQOS, psessionEntry->limCurrentBssPropCap))) PopulateDot11fQOSCapsStation( pMac, &frm.QOSCapsStation ); PopulateDot11fExtSuppRates( pMac, POPULATE_DOT11F_RATES_OPERATIONAL, &frm.ExtSuppRates, psessionEntry ); #if defined WLAN_FEATURE_VOWIFI if( pMac->rrm.rrmPEContext.rrmEnable && SIR_MAC_GET_RRM( psessionEntry->limCurrentBssCaps ) ) { PopulateDot11fRRMIe( pMac, &frm.RRMEnabledCap, psessionEntry ); } #endif // The join request *should* contain zero or one of the WPA and RSN // IEs. The payload send along with the request is a // 'tSirSmeJoinReq'; the IE portion is held inside a 'tSirRSNie': // typedef struct sSirRSNie // { // tANI_U16 length; // tANI_U8 rsnIEdata[SIR_MAC_MAX_IE_LENGTH+2]; // } tSirRSNie, *tpSirRSNie; // So, we should be able to make the following two calls harmlessly, // since they do nothing if they don't find the given IE in the // bytestream with which they're provided. // The net effect of this will be to faithfully transmit whatever // security IE is in the join request. // *However*, if we're associating for the purpose of WPS // enrollment, and we've been configured to indicate that by // eliding the WPA or RSN IE, we just skip this: if( nAddIELen && pAddIE ) { wpsIe = limGetWscIEPtr (pMac, pAddIE, nAddIELen); } if ( NULL == wpsIe ) { PopulateDot11fRSNOpaque( pMac, &( psessionEntry->pLimJoinReq->rsnIE ), &frm.RSNOpaque ); PopulateDot11fWPAOpaque( pMac, &( psessionEntry->pLimJoinReq->rsnIE ), &frm.WPAOpaque ); #if defined(FEATURE_WLAN_WAPI) PopulateDot11fWAPIOpaque( pMac, &( psessionEntry->pLimJoinReq->rsnIE ), &frm.WAPIOpaque ); #endif // defined(FEATURE_WLAN_WAPI) } // include WME EDCA IE as well if ( fWmeEnabled ) { if ( ! PROP_CAPABILITY_GET( WME, psessionEntry->limCurrentBssPropCap ) ) { PopulateDot11fWMMInfoStation( pMac, &frm.WMMInfoStation ); } if ( fWsmEnabled && ( ! PROP_CAPABILITY_GET(WSM, psessionEntry->limCurrentBssPropCap ))) { PopulateDot11fWMMCaps( &frm.WMMCaps ); } } //Populate HT IEs, when operating in 11n or Taurus modes AND //when AP is also operating in 11n mode. if ( psessionEntry->htCapabality && pMac->lim.htCapabilityPresentInBeacon) { PopulateDot11fHTCaps( pMac, &frm.HTCaps ); #ifdef DISABLE_GF_FOR_INTEROP /* * To resolve the interop problem with Broadcom AP, * where TQ STA could not pass traffic with GF enabled, * TQ STA will do Greenfield only with TQ AP, for * everybody else it will be turned off. */ if( (psessionEntry->pLimJoinReq != NULL) && (!psessionEntry->pLimJoinReq->bssDescription.aniIndicator)) { limLog( pMac, LOG1, FL("Sending Assoc Req to Non-TQ AP, Turning off Greenfield")); frm.HTCaps.greenField = WNI_CFG_GREENFIELD_CAPABILITY_DISABLE; } #endif } #if defined WLAN_FEATURE_VOWIFI_11R if (psessionEntry->pLimJoinReq->is11Rconnection) { #if defined WLAN_FEATURE_VOWIFI_11R_DEBUG limLog( pMac, LOGE, FL("mdie = %02x %02x %02x\n"), (unsigned int)psessionEntry->pLimJoinReq->bssDescription.mdie[0], (unsigned int)psessionEntry->pLimJoinReq->bssDescription.mdie[1], (unsigned int)psessionEntry->pLimJoinReq->bssDescription.mdie[2]); #endif PopulateMDIE( pMac, &frm.MobilityDomain, psessionEntry->pLimJoinReq->bssDescription.mdie); } else { // No 11r IEs dont send any MDIE limLog( pMac, LOGE, FL("mdie not present\n")); } #endif #ifdef FEATURE_WLAN_CCX // For CCX Associations fill the CCX IEs if (psessionEntry->isCCXconnection) { PopulateDot11fCCXRadMgmtCap(&frm.CCXRadMgmtCap); PopulateDot11fCCXVersion(&frm.CCXVersion); } #endif nStatus = dot11fGetPackedAssocRequestSize( pMac, &frm, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed size f" "or an Association Request (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fAssocRequest ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for an Association Re " "quest(0x%08x).\n"), nStatus ); } nBytes = nPayload + sizeof( tSirMacMgmtHdr ) + nAddIELen; halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGP, FL("Failed to allocate %d bytes for an As" "sociation Request.\n"), nBytes ); psessionEntry->limMlmState = psessionEntry->limPrevMlmState; MTRACE(macTrace(pMac, TRACE_CODE_MLM_STATE, 0, pMac->lim.gLimMlmState)); /* Update PE session id*/ mlmAssocCnf.sessionId = psessionEntry->peSessionId; mlmAssocCnf.resultCode = eSIR_SME_RESOURCES_UNAVAILABLE; palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); limPostSmeMessage( pMac, LIM_MLM_ASSOC_CNF, ( tANI_U32* ) &mlmAssocCnf); return; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Next, we fill out the buffer descriptor: nSirStatus = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_ASSOC_REQ, psessionEntry->bssId,psessionEntry->selfMacAddr); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to populate the buffer descrip" "tor for an Association Request (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; } // That done, pack the Probe Request: nStatus = dot11fPackAssocRequest( pMac, &frm, pFrame + sizeof(tSirMacMgmtHdr), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack a Probe Response (0x%0" "8x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing a P" "robe Response (0x%08x).\n") ); } PELOG1(limLog( pMac, LOG1, FL("*** Sending Association Request length %d" "to \n"), nBytes );) // limPrintMacAddr( pMac, bssid, LOG1 ); if( psessionEntry->assocReq != NULL ) { palFreeMemory(pMac->hHdd, psessionEntry->assocReq); psessionEntry->assocReq = NULL; } if( nAddIELen ) { palCopyMemory( pMac->hHdd, pFrame + sizeof(tSirMacMgmtHdr) + nPayload, pAddIE, nAddIELen ); nPayload += nAddIELen; } if( (palAllocateMemory(pMac->hHdd, (void**)&psessionEntry->assocReq, nPayload)) != eHAL_STATUS_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("Unable to allocate memory to store assoc request"));) } else { //Store the Assoc request. This is sent to csr/hdd in join cnf response. palCopyMemory(pMac->hHdd, psessionEntry->assocReq, pFrame + sizeof(tSirMacMgmtHdr), nPayload); psessionEntry->assocReqLen = nPayload; } if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } halstatus = halTxFrame( pMac, pPacket, ( tANI_U16 ) (sizeof(tSirMacMgmtHdr) + nPayload), HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, txFlag ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGE, FL("Failed to send Association Request (%X)!\n"), halstatus ); //Pkt will be freed up by the callback return; } // Free up buffer allocated for mlmAssocReq palFreeMemory( pMac->hHdd, ( tANI_U8* ) pMlmAssocReq ); } // End limSendAssocReqMgmtFrame #if defined WLAN_FEATURE_VOWIFI_11R || defined FEATURE_WLAN_CCX /*------------------------------------------------------------------------------------ * * Send Reassoc Req with FTIEs. * *----------------------------------------------------------------------------------- */ void limSendReassocReqWithFTIEsMgmtFrame(tpAniSirGlobal pMac, tLimMlmReassocReq *pMlmReassocReq,tpPESession psessionEntry) { static tDot11fReAssocRequest frm; tANI_U16 caps; tANI_U8 *pFrame; tSirRetStatus nSirStatus; tANI_U32 nBytes, nPayload, nStatus; tANI_U8 fQosEnabled, fWmeEnabled, fWsmEnabled; void *pPacket; eHalStatus halstatus; #if defined WLAN_FEATURE_VOWIFI tANI_U8 PowerCapsPopulated = FALSE; #endif tANI_U16 ft_ies_length = 0; tANI_U8 *pBody; tANI_U16 nAddIELen; tANI_U8 *pAddIE; #ifdef FEATURE_WLAN_CCX tANI_U8 *wpsIe = NULL; #endif tANI_U8 txFlag = 0; if (NULL == psessionEntry) { return; } #if defined WLAN_FEATURE_VOWIFI_11R if (psessionEntry->is11Rconnection) { if (pMac->ft.ftSmeContext.reassoc_ft_ies_length == 0) { return; } } #endif /* check this early to avoid unncessary operation */ if(NULL == psessionEntry->pLimReAssocReq) { return; } nAddIELen = psessionEntry->pLimReAssocReq->addIEAssoc.length; pAddIE = psessionEntry->pLimReAssocReq->addIEAssoc.addIEdata; limLog( pMac, LOGE, FL("limSendReassocReqWithFTIEsMgmtFrame received in " "state (%d).\n"), psessionEntry->limMlmState); palZeroMemory( pMac->hHdd, ( tANI_U8* )&frm, sizeof( frm ) ); caps = pMlmReassocReq->capabilityInfo; if (PROP_CAPABILITY_GET(11EQOS, psessionEntry->limReassocBssPropCap)) ((tSirMacCapabilityInfo *) &caps)->qos = 0; #if defined(FEATURE_WLAN_WAPI) /* CR: 262463 : According to WAPI standard: 7.3.1.4 Capability Information field In WAPI, non-AP STAs within an ESS set the Privacy subfield to 0 in transmitted Association or Reassociation management frames. APs ignore the Privacy subfield within received Association and Reassociation management frames. */ if ( psessionEntry->encryptType == eSIR_ED_WPI) ((tSirMacCapabilityInfo *) &caps)->privacy = 0; #endif swapBitField16(caps, ( tANI_U16* )&frm.Capabilities ); frm.ListenInterval.interval = pMlmReassocReq->listenInterval; // Get the old bssid of the older AP. palCopyMemory( pMac->hHdd, ( tANI_U8* )frm.CurrentAPAddress.mac, pMac->ft.ftPEContext.pFTPreAuthReq->currbssId, 6); PopulateDot11fSSID2( pMac, &frm.SSID ); PopulateDot11fSuppRates( pMac, POPULATE_DOT11F_RATES_OPERATIONAL, &frm.SuppRates,psessionEntry); fQosEnabled = ( psessionEntry->limQosEnabled) && SIR_MAC_GET_QOS( psessionEntry->limReassocBssCaps ); fWmeEnabled = ( psessionEntry->limWmeEnabled ) && LIM_BSS_CAPS_GET( WME, psessionEntry->limReassocBssQosCaps ); fWsmEnabled = ( psessionEntry->limWsmEnabled ) && fWmeEnabled && LIM_BSS_CAPS_GET( WSM, psessionEntry->limReassocBssQosCaps ); if ( psessionEntry->lim11hEnable && psessionEntry->pLimReAssocReq->spectrumMgtIndicator == eSIR_TRUE ) { #if defined WLAN_FEATURE_VOWIFI PowerCapsPopulated = TRUE; PopulateDot11fPowerCaps( pMac, &frm.PowerCaps, LIM_REASSOC,psessionEntry); PopulateDot11fSuppChannels( pMac, &frm.SuppChannels, LIM_REASSOC,psessionEntry); #endif } #if defined WLAN_FEATURE_VOWIFI if( pMac->rrm.rrmPEContext.rrmEnable && SIR_MAC_GET_RRM( psessionEntry->limCurrentBssCaps ) ) { if (PowerCapsPopulated == FALSE) { PowerCapsPopulated = TRUE; PopulateDot11fPowerCaps(pMac, &frm.PowerCaps, LIM_REASSOC, psessionEntry); } } #endif if ( fQosEnabled && ( ! PROP_CAPABILITY_GET(11EQOS, psessionEntry->limReassocBssPropCap ) )) { PopulateDot11fQOSCapsStation( pMac, &frm.QOSCapsStation ); } PopulateDot11fExtSuppRates( pMac, POPULATE_DOT11F_RATES_OPERATIONAL, &frm.ExtSuppRates, psessionEntry ); #if defined WLAN_FEATURE_VOWIFI if( pMac->rrm.rrmPEContext.rrmEnable && SIR_MAC_GET_RRM( psessionEntry->limReassocBssCaps ) ) { PopulateDot11fRRMIe( pMac, &frm.RRMEnabledCap, psessionEntry ); } #endif // Ideally this should be enabled for 11r also. But 11r does // not follow the usual norm of using the Opaque object // for rsnie and fties. Instead we just add // the rsnie and fties at the end of the pack routine for 11r. // This should ideally! be fixed. #ifdef FEATURE_WLAN_CCX // // The join request *should* contain zero or one of the WPA and RSN // IEs. The payload send along with the request is a // 'tSirSmeJoinReq'; the IE portion is held inside a 'tSirRSNie': // typedef struct sSirRSNie // { // tANI_U16 length; // tANI_U8 rsnIEdata[SIR_MAC_MAX_IE_LENGTH+2]; // } tSirRSNie, *tpSirRSNie; // So, we should be able to make the following two calls harmlessly, // since they do nothing if they don't find the given IE in the // bytestream with which they're provided. // The net effect of this will be to faithfully transmit whatever // security IE is in the join request. // *However*, if we're associating for the purpose of WPS // enrollment, and we've been configured to indicate that by // eliding the WPA or RSN IE, we just skip this: if (!psessionEntry->is11Rconnection) { if( nAddIELen && pAddIE ) { wpsIe = limGetWscIEPtr(pMac, pAddIE, nAddIELen); } if ( NULL == wpsIe ) { PopulateDot11fRSNOpaque( pMac, &( psessionEntry->pLimReAssocReq->rsnIE ), &frm.RSNOpaque ); PopulateDot11fWPAOpaque( pMac, &( psessionEntry->pLimReAssocReq->rsnIE ), &frm.WPAOpaque ); } if(psessionEntry->pLimReAssocReq->cckmIE.length) { PopulateDot11fCCXCckmOpaque( pMac, &( psessionEntry->pLimReAssocReq->cckmIE ), &frm.CCXCckmOpaque ); } } // For CCX Associations fill the CCX IEs if (psessionEntry->isCCXconnection) { PopulateDot11fCCXRadMgmtCap(&frm.CCXRadMgmtCap); PopulateDot11fCCXVersion(&frm.CCXVersion); } #endif // include WME EDCA IE as well if ( fWmeEnabled ) { if ( ! PROP_CAPABILITY_GET( WME, psessionEntry->limReassocBssPropCap ) ) { PopulateDot11fWMMInfoStation( pMac, &frm.WMMInfoStation ); } if ( fWsmEnabled && ( ! PROP_CAPABILITY_GET(WSM, psessionEntry->limReassocBssPropCap ))) { PopulateDot11fWMMCaps( &frm.WMMCaps ); } #ifdef FEATURE_WLAN_CCX if (psessionEntry->isCCXconnection) { PopulateDot11fReAssocTspec(pMac, &frm, psessionEntry); // Populate the TSRS IE if TSPEC is included in the reassoc request if (psessionEntry->pLimReAssocReq->ccxTspecInfo.numTspecs) { tANI_U32 phyMode; tSirMacCCXTSRSIE tsrsIE; limGetPhyMode(pMac, &phyMode, psessionEntry); tsrsIE.tsid = 0; if( phyMode == WNI_CFG_PHY_MODE_11G || phyMode == WNI_CFG_PHY_MODE_11A) { tsrsIE.rates[0] = TSRS_11AG_RATE_6MBPS; } else { tsrsIE.rates[0] = TSRS_11B_RATE_5_5MBPS; } PopulateDot11TSRSIE(pMac,&tsrsIE, &frm.CCXTrafStrmRateSet, sizeof(tANI_U8)); } } #endif } if ( psessionEntry->htCapabality && pMac->lim.htCapabilityPresentInBeacon) { PopulateDot11fHTCaps( pMac, &frm.HTCaps ); } nStatus = dot11fGetPackedReAssocRequestSize( pMac, &frm, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed size f" "or a Re-Association Request (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fReAssocRequest ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for a Re-Association Re " "quest(0x%08x).\n"), nStatus ); } nBytes = nPayload + sizeof( tSirMacMgmtHdr ) + nAddIELen; ; #ifdef WLAN_FEATURE_VOWIFI_11R_DEBUG limLog( pMac, LOGE, FL("FT IE Reassoc Req (%d).\n"), pMac->ft.ftSmeContext.reassoc_ft_ies_length); #endif #if defined WLAN_FEATURE_VOWIFI_11R if (psessionEntry->is11Rconnection) { ft_ies_length = pMac->ft.ftSmeContext.reassoc_ft_ies_length; } #endif halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes+ft_ies_length, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { psessionEntry->limMlmState = psessionEntry->limPrevMlmState; MTRACE(macTrace(pMac, TRACE_CODE_MLM_STATE, 0, pMac->lim.gLimMlmState)); limLog( pMac, LOGP, FL("Failed to allocate %d bytes for a Re-As" "sociation Request.\n"), nBytes ); goto end; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes + ft_ies_length); #if defined WLAN_FEATURE_VOWIFI_11R_DEBUG || defined FEATURE_WLAN_CCX limPrintMacAddr(pMac, psessionEntry->limReAssocbssId, LOGE); #endif // Next, we fill out the buffer descriptor: nSirStatus = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_REASSOC_REQ, psessionEntry->limReAssocbssId,psessionEntry->selfMacAddr); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to populate the buffer descrip" "tor for an Association Request (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); goto end; } // That done, pack the ReAssoc Request: nStatus = dot11fPackReAssocRequest( pMac, &frm, pFrame + sizeof(tSirMacMgmtHdr), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack a Re-Association Reque" "st (0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); goto end; } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing a R" "e-Association Request (0x%08x).\n") ); } PELOG3(limLog( pMac, LOG3, FL("*** Sending Re-Association Request length %d %d to \n"), nBytes, nPayload );) if( psessionEntry->assocReq != NULL ) { palFreeMemory(pMac->hHdd, psessionEntry->assocReq); psessionEntry->assocReq = NULL; } if( nAddIELen ) { palCopyMemory( pMac->hHdd, pFrame + sizeof(tSirMacMgmtHdr) + nPayload, pAddIE, nAddIELen ); nPayload += nAddIELen; } if( (palAllocateMemory(pMac->hHdd, (void**)&psessionEntry->assocReq, nPayload)) != eHAL_STATUS_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("Unable to allocate memory to store assoc request"));) } else { //Store the Assoc request. This is sent to csr/hdd in join cnf response. palCopyMemory(pMac->hHdd, psessionEntry->assocReq, pFrame + sizeof(tSirMacMgmtHdr), nPayload); psessionEntry->assocReqLen = nPayload; } if (psessionEntry->is11Rconnection) { { int i = 0; pBody = pFrame + nBytes; for (i=0; i<ft_ies_length; i++) { *pBody = pMac->ft.ftSmeContext.reassoc_ft_ies[i]; pBody++; } } } #ifdef WLAN_FEATURE_VOWIFI_11R_DEBUG PELOGE(limLog(pMac, LOGE, FL("Re-assoc Req Frame is: ")); sirDumpBuf(pMac, SIR_LIM_MODULE_ID, LOGE, (tANI_U8 *)pFrame, (nBytes + ft_ies_length));) #endif if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } halstatus = halTxFrame( pMac, pPacket, ( tANI_U16 ) (nBytes + ft_ies_length), HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, txFlag ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGE, FL("Failed to send Re-Association Request" "(%X)!\n"), nSirStatus ); //Pkt will be freed up by the callback goto end; } end: // Free up buffer allocated for mlmAssocReq palFreeMemory( pMac->hHdd, ( tANI_U8* ) pMlmReassocReq ); psessionEntry->pLimMlmReassocReq = NULL; } #endif /* WLAN_FEATURE_VOWIFI_11R */ void limSendReassocReqMgmtFrame(tpAniSirGlobal pMac, tLimMlmReassocReq *pMlmReassocReq,tpPESession psessionEntry) { static tDot11fReAssocRequest frm; tANI_U16 caps; tANI_U8 *pFrame; tSirRetStatus nSirStatus; tANI_U32 nBytes, nPayload, nStatus; tANI_U8 fQosEnabled, fWmeEnabled, fWsmEnabled; void *pPacket; eHalStatus halstatus; tANI_U16 nAddIELen; tANI_U8 *pAddIE; tANI_U8 *wpsIe = NULL; tANI_U8 txFlag = 0; #if defined WLAN_FEATURE_VOWIFI tANI_U8 PowerCapsPopulated = FALSE; #endif if(NULL == psessionEntry) { return; } /* check this early to avoid unncessary operation */ if(NULL == psessionEntry->pLimReAssocReq) { return; } nAddIELen = psessionEntry->pLimReAssocReq->addIEAssoc.length; pAddIE = psessionEntry->pLimReAssocReq->addIEAssoc.addIEdata; palZeroMemory( pMac->hHdd, ( tANI_U8* )&frm, sizeof( frm ) ); caps = pMlmReassocReq->capabilityInfo; if (PROP_CAPABILITY_GET(11EQOS, psessionEntry->limReassocBssPropCap)) ((tSirMacCapabilityInfo *) &caps)->qos = 0; #if defined(FEATURE_WLAN_WAPI) /* CR: 262463 : According to WAPI standard: 7.3.1.4 Capability Information field In WAPI, non-AP STAs within an ESS set the Privacy subfield to 0 in transmitted Association or Reassociation management frames. APs ignore the Privacy subfield within received Association and Reassociation management frames. */ if ( psessionEntry->encryptType == eSIR_ED_WPI) ((tSirMacCapabilityInfo *) &caps)->privacy = 0; #endif swapBitField16(caps, ( tANI_U16* )&frm.Capabilities ); frm.ListenInterval.interval = pMlmReassocReq->listenInterval; palCopyMemory( pMac->hHdd, ( tANI_U8* )frm.CurrentAPAddress.mac, ( tANI_U8* )psessionEntry->bssId, 6 ); PopulateDot11fSSID2( pMac, &frm.SSID ); PopulateDot11fSuppRates( pMac, POPULATE_DOT11F_RATES_OPERATIONAL, &frm.SuppRates,psessionEntry); fQosEnabled = ( psessionEntry->limQosEnabled ) && SIR_MAC_GET_QOS( psessionEntry->limReassocBssCaps ); fWmeEnabled = ( psessionEntry->limWmeEnabled ) && LIM_BSS_CAPS_GET( WME, psessionEntry->limReassocBssQosCaps ); fWsmEnabled = ( psessionEntry->limWsmEnabled ) && fWmeEnabled && LIM_BSS_CAPS_GET( WSM, psessionEntry->limReassocBssQosCaps ); if ( psessionEntry->lim11hEnable && psessionEntry->pLimReAssocReq->spectrumMgtIndicator == eSIR_TRUE ) { #if defined WLAN_FEATURE_VOWIFI PowerCapsPopulated = TRUE; PopulateDot11fPowerCaps( pMac, &frm.PowerCaps, LIM_REASSOC,psessionEntry); PopulateDot11fSuppChannels( pMac, &frm.SuppChannels, LIM_REASSOC,psessionEntry); #endif } #if defined WLAN_FEATURE_VOWIFI if( pMac->rrm.rrmPEContext.rrmEnable && SIR_MAC_GET_RRM( psessionEntry->limCurrentBssCaps ) ) { if (PowerCapsPopulated == FALSE) { PowerCapsPopulated = TRUE; PopulateDot11fPowerCaps(pMac, &frm.PowerCaps, LIM_REASSOC, psessionEntry); } } #endif if ( fQosEnabled && ( ! PROP_CAPABILITY_GET(11EQOS, psessionEntry->limReassocBssPropCap ) )) { PopulateDot11fQOSCapsStation( pMac, &frm.QOSCapsStation ); } PopulateDot11fExtSuppRates( pMac, POPULATE_DOT11F_RATES_OPERATIONAL, &frm.ExtSuppRates, psessionEntry ); #if defined WLAN_FEATURE_VOWIFI if( pMac->rrm.rrmPEContext.rrmEnable && SIR_MAC_GET_RRM( psessionEntry->limReassocBssCaps ) ) { PopulateDot11fRRMIe( pMac, &frm.RRMEnabledCap, psessionEntry ); } #endif // The join request *should* contain zero or one of the WPA and RSN // IEs. The payload send along with the request is a // 'tSirSmeJoinReq'; the IE portion is held inside a 'tSirRSNie': // typedef struct sSirRSNie // { // tANI_U16 length; // tANI_U8 rsnIEdata[SIR_MAC_MAX_IE_LENGTH+2]; // } tSirRSNie, *tpSirRSNie; // So, we should be able to make the following two calls harmlessly, // since they do nothing if they don't find the given IE in the // bytestream with which they're provided. // The net effect of this will be to faithfully transmit whatever // security IE is in the join request. // *However*, if we're associating for the purpose of WPS // enrollment, and we've been configured to indicate that by // eliding the WPA or RSN IE, we just skip this: if( nAddIELen && pAddIE ) { wpsIe = limGetWscIEPtr(pMac, pAddIE, nAddIELen); } if ( NULL == wpsIe ) { PopulateDot11fRSNOpaque( pMac, &( psessionEntry->pLimReAssocReq->rsnIE ), &frm.RSNOpaque ); PopulateDot11fWPAOpaque( pMac, &( psessionEntry->pLimReAssocReq->rsnIE ), &frm.WPAOpaque ); #if defined(FEATURE_WLAN_WAPI) PopulateDot11fWAPIOpaque( pMac, &( psessionEntry->pLimReAssocReq->rsnIE ), &frm.WAPIOpaque ); #endif // defined(FEATURE_WLAN_WAPI) } // include WME EDCA IE as well if ( fWmeEnabled ) { if ( ! PROP_CAPABILITY_GET( WME, psessionEntry->limReassocBssPropCap ) ) { PopulateDot11fWMMInfoStation( pMac, &frm.WMMInfoStation ); } if ( fWsmEnabled && ( ! PROP_CAPABILITY_GET(WSM, psessionEntry->limReassocBssPropCap ))) { PopulateDot11fWMMCaps( &frm.WMMCaps ); } } if ( psessionEntry->htCapabality && pMac->lim.htCapabilityPresentInBeacon) { PopulateDot11fHTCaps( pMac, &frm.HTCaps ); } nStatus = dot11fGetPackedReAssocRequestSize( pMac, &frm, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed size f" "or a Re-Association Request (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fReAssocRequest ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for a Re-Association Re " "quest(0x%08x).\n"), nStatus ); } nBytes = nPayload + sizeof( tSirMacMgmtHdr ) + nAddIELen; halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { psessionEntry->limMlmState = psessionEntry->limPrevMlmState; MTRACE(macTrace(pMac, TRACE_CODE_MLM_STATE, 0, pMac->lim.gLimMlmState)); limLog( pMac, LOGP, FL("Failed to allocate %d bytes for a Re-As" "sociation Request.\n"), nBytes ); goto end; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Next, we fill out the buffer descriptor: nSirStatus = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_REASSOC_REQ, psessionEntry->limReAssocbssId,psessionEntry->selfMacAddr); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to populate the buffer descrip" "tor for an Association Request (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); goto end; } // That done, pack the Probe Request: nStatus = dot11fPackReAssocRequest( pMac, &frm, pFrame + sizeof(tSirMacMgmtHdr), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack a Re-Association Reque" "st (0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); goto end; } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing a R" "e-Association Request (0x%08x).\n") ); } PELOG1(limLog( pMac, LOG1, FL("*** Sending Re-Association Request length %d" "to \n"), nBytes );) if( psessionEntry->assocReq != NULL ) { palFreeMemory(pMac->hHdd, psessionEntry->assocReq); psessionEntry->assocReq = NULL; } if( nAddIELen ) { palCopyMemory( pMac->hHdd, pFrame + sizeof(tSirMacMgmtHdr) + nPayload, pAddIE, nAddIELen ); nPayload += nAddIELen; } if( (palAllocateMemory(pMac->hHdd, (void**)&psessionEntry->assocReq, nPayload)) != eHAL_STATUS_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("Unable to allocate memory to store assoc request"));) } else { //Store the Assoc request. This is sent to csr/hdd in join cnf response. palCopyMemory(pMac->hHdd, psessionEntry->assocReq, pFrame + sizeof(tSirMacMgmtHdr), nPayload); psessionEntry->assocReqLen = nPayload; } if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } halstatus = halTxFrame( pMac, pPacket, ( tANI_U16 ) (sizeof(tSirMacMgmtHdr) + nPayload), HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, txFlag ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGE, FL("Failed to send Re-Association Request" "(%X)!\n"), nSirStatus ); //Pkt will be freed up by the callback goto end; } end: // Free up buffer allocated for mlmAssocReq palFreeMemory( pMac->hHdd, ( tANI_U8* ) pMlmReassocReq ); psessionEntry->pLimMlmReassocReq = NULL; } // limSendReassocReqMgmtFrame /** * \brief Send an Authentication frame * * * \param pMac Pointer to Global MAC structure * * \param pAuthFrameBody Pointer to Authentication frame structure that need * to be sent * * \param peerMacAddr MAC address of the peer entity to which Authentication * frame is destined * * \param wepBit Indicates whether wep bit to be set in FC while sending * Authentication frame3 * * * This function is called by limProcessMlmMessages(). Authentication frame * is formatted and sent when this function is called. * * */ void limSendAuthMgmtFrame(tpAniSirGlobal pMac, tpSirMacAuthFrameBody pAuthFrameBody, tSirMacAddr peerMacAddr, tANI_U8 wepBit, tpPESession psessionEntry ) { tANI_U8 *pFrame, *pBody; tANI_U32 frameLen = 0, bodyLen = 0; tpSirMacMgmtHdr pMacHdr; tANI_U16 i; void *pPacket; eHalStatus halstatus; tANI_U8 txFlag = 0; if(NULL == psessionEntry) { return; } if (wepBit == LIM_WEP_IN_FC) { /// Auth frame3 to be sent with encrypted framebody /** * Allocate buffer for Authenticaton frame of size equal * to management frame header length plus 2 bytes each for * auth algorithm number, transaction number, status code, * 128 bytes for challenge text and 4 bytes each for * IV & ICV. */ frameLen = sizeof(tSirMacMgmtHdr) + LIM_ENCR_AUTH_BODY_LEN; bodyLen = LIM_ENCR_AUTH_BODY_LEN; } // if (wepBit == LIM_WEP_IN_FC) else { switch (pAuthFrameBody->authTransactionSeqNumber) { case SIR_MAC_AUTH_FRAME_1: /** * Allocate buffer for Authenticaton frame of size * equal to management frame header length plus 2 bytes * each for auth algorithm number, transaction number * and status code. */ frameLen = sizeof(tSirMacMgmtHdr) + SIR_MAC_AUTH_CHALLENGE_OFFSET; bodyLen = SIR_MAC_AUTH_CHALLENGE_OFFSET; #if defined WLAN_FEATURE_VOWIFI_11R if (pAuthFrameBody->authAlgoNumber == eSIR_FT_AUTH) { if (pMac->ft.ftPEContext.pFTPreAuthReq->ft_ies) { frameLen += pMac->ft.ftPEContext.pFTPreAuthReq->ft_ies_length; limLog(pMac, LOG3, FL("Auth frame, FTIES length added=%d\n"), pMac->ft.ftPEContext.pFTPreAuthReq->ft_ies_length); } else limLog(pMac, LOG3, FL("Auth frame, Does not contain FTIES!!!\n")); } #endif break; case SIR_MAC_AUTH_FRAME_2: if ((pAuthFrameBody->authAlgoNumber == eSIR_OPEN_SYSTEM) || ((pAuthFrameBody->authAlgoNumber == eSIR_SHARED_KEY) && (pAuthFrameBody->authStatusCode != eSIR_MAC_SUCCESS_STATUS))) { /** * Allocate buffer for Authenticaton frame of size * equal to management frame header length plus * 2 bytes each for auth algorithm number, * transaction number and status code. */ frameLen = sizeof(tSirMacMgmtHdr) + SIR_MAC_AUTH_CHALLENGE_OFFSET; bodyLen = SIR_MAC_AUTH_CHALLENGE_OFFSET; } else { // Shared Key algorithm with challenge text // to be sent /** * Allocate buffer for Authenticaton frame of size * equal to management frame header length plus * 2 bytes each for auth algorithm number, * transaction number, status code and 128 bytes * for challenge text. */ frameLen = sizeof(tSirMacMgmtHdr) + sizeof(tSirMacAuthFrame); bodyLen = sizeof(tSirMacAuthFrameBody); } break; case SIR_MAC_AUTH_FRAME_3: /// Auth frame3 to be sent without encrypted framebody /** * Allocate buffer for Authenticaton frame of size equal * to management frame header length plus 2 bytes each * for auth algorithm number, transaction number and * status code. */ frameLen = sizeof(tSirMacMgmtHdr) + SIR_MAC_AUTH_CHALLENGE_OFFSET; bodyLen = SIR_MAC_AUTH_CHALLENGE_OFFSET; break; case SIR_MAC_AUTH_FRAME_4: /** * Allocate buffer for Authenticaton frame of size equal * to management frame header length plus 2 bytes each * for auth algorithm number, transaction number and * status code. */ frameLen = sizeof(tSirMacMgmtHdr) + SIR_MAC_AUTH_CHALLENGE_OFFSET; bodyLen = SIR_MAC_AUTH_CHALLENGE_OFFSET; break; } // switch (pAuthFrameBody->authTransactionSeqNumber) } // end if (wepBit == LIM_WEP_IN_FC) halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )frameLen, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { // Log error limLog(pMac, LOGP, FL("call to bufAlloc failed for AUTH frame\n")); return; } for (i = 0; i < frameLen; i++) pFrame[i] = 0; // Prepare BD if (limPopulateMacHeader(pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_AUTH, peerMacAddr,psessionEntry->selfMacAddr) != eSIR_SUCCESS) { palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; } pMacHdr = ( tpSirMacMgmtHdr ) pFrame; pMacHdr->fc.wep = wepBit; // Prepare BSSId if( (psessionEntry->limSystemRole == eLIM_AP_ROLE)|| (psessionEntry->limSystemRole == eLIM_BT_AMP_AP_ROLE) ) { palCopyMemory( pMac->hHdd,(tANI_U8 *) pMacHdr->bssId, (tANI_U8 *) psessionEntry->bssId, sizeof( tSirMacAddr )); } /// Prepare Authentication frame body pBody = pFrame + sizeof(tSirMacMgmtHdr); if (wepBit == LIM_WEP_IN_FC) { palCopyMemory( pMac->hHdd, pBody, (tANI_U8 *) pAuthFrameBody, bodyLen); PELOG1(limLog(pMac, LOG1, FL("*** Sending Auth seq# 3 status %d (%d) to\n"), pAuthFrameBody->authStatusCode, (pAuthFrameBody->authStatusCode == eSIR_MAC_SUCCESS_STATUS)); limPrintMacAddr(pMac, pMacHdr->da, LOG1);) } else { *((tANI_U16 *)(pBody)) = sirSwapU16ifNeeded(pAuthFrameBody->authAlgoNumber); pBody += sizeof(tANI_U16); bodyLen -= sizeof(tANI_U16); *((tANI_U16 *)(pBody)) = sirSwapU16ifNeeded(pAuthFrameBody->authTransactionSeqNumber); pBody += sizeof(tANI_U16); bodyLen -= sizeof(tANI_U16); *((tANI_U16 *)(pBody)) = sirSwapU16ifNeeded(pAuthFrameBody->authStatusCode); pBody += sizeof(tANI_U16); bodyLen -= sizeof(tANI_U16); palCopyMemory( pMac->hHdd, pBody, (tANI_U8 *) &pAuthFrameBody->type, bodyLen); #if defined WLAN_FEATURE_VOWIFI_11R if ((pAuthFrameBody->authAlgoNumber == eSIR_FT_AUTH) && (pAuthFrameBody->authTransactionSeqNumber == SIR_MAC_AUTH_FRAME_1)) { { int i = 0; #if defined WLAN_FEATURE_VOWIFI_11R_DEBUG if (pMac->ft.ftPEContext.pFTPreAuthReq->ft_ies_length) { PELOGE(limLog(pMac, LOGE, FL("Auth1 Frame FTIE is: ")); sirDumpBuf(pMac, SIR_LIM_MODULE_ID, LOGE, (tANI_U8 *)pBody, (pMac->ft.ftPEContext.pFTPreAuthReq->ft_ies_length));) } #endif for (i=0; i<pMac->ft.ftPEContext.pFTPreAuthReq->ft_ies_length; i++) { *pBody = pMac->ft.ftPEContext.pFTPreAuthReq->ft_ies[i]; pBody++; } } } #endif PELOG1(limLog(pMac, LOG1, FL("*** Sending Auth seq# %d status %d (%d) to "), pAuthFrameBody->authTransactionSeqNumber, pAuthFrameBody->authStatusCode, (pAuthFrameBody->authStatusCode == eSIR_MAC_SUCCESS_STATUS)); limPrintMacAddr(pMac, pMacHdr->da, LOG1);) } PELOG2(sirDumpBuf(pMac, SIR_LIM_MODULE_ID, LOG2, pFrame, frameLen);) if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } /// Queue Authentication frame in high priority WQ halstatus = halTxFrame( pMac, pPacket, ( tANI_U16 ) frameLen, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, txFlag ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog(pMac, LOGE, FL("*** Could not send Auth frame, retCode=%X ***\n"), halstatus); //Pkt will be freed up by the callback } return; } /*** end limSendAuthMgmtFrame() ***/ /** * \brief This function is called to send Disassociate frame. * * * \param pMac Pointer to Global MAC structure * * \param nReason Indicates the reason that need to be sent in * Disassociation frame * * \param peerMacAddr MAC address of the STA to which Disassociation frame is * sent * * */ void limSendDisassocMgmtFrame(tpAniSirGlobal pMac, tANI_U16 nReason, tSirMacAddr peer,tpPESession psessionEntry) { tDot11fDisassociation frm; tANI_U8 *pFrame; tSirRetStatus nSirStatus; tpSirMacMgmtHdr pMacHdr; tANI_U32 nBytes, nPayload, nStatus; void *pPacket; eHalStatus halstatus; tANI_U8 txFlag = 0; if(NULL == psessionEntry) { return; } palZeroMemory( pMac->hHdd, ( tANI_U8* )&frm, sizeof( frm ) ); frm.Reason.code = nReason; nStatus = dot11fGetPackedDisassociationSize( pMac, &frm, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed size f" "or a Disassociation (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fDisassociation ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for a Disassociation " "(0x%08x).\n"), nStatus ); } nBytes = nPayload + sizeof( tSirMacMgmtHdr ); halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGP, FL("Failed to allocate %d bytes for a Dis" "association.\n"), nBytes ); return; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Next, we fill out the buffer descriptor: nSirStatus = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_DISASSOC, peer,psessionEntry->selfMacAddr); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to populate the buffer descrip" "tor for a Disassociation (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // just allocated... } pMacHdr = ( tpSirMacMgmtHdr ) pFrame; // Prepare the BSSID sirCopyMacAddr(pMacHdr->bssId,psessionEntry->bssId); nStatus = dot11fPackDisassociation( pMac, &frm, pFrame + sizeof(tSirMacMgmtHdr), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack a Disassociation (0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // allocated! } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing a D" "isassociation (0x%08x).\n") ); } PELOG1(limLog( pMac, LOG1, FL("*** Sending Disassociation frame with rea" "son %d to\n"), nReason ); limPrintMacAddr( pMac, pMacHdr->da, LOG1 );) if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } // Queue Disassociation frame in high priority WQ halstatus = halTxFrame( pMac, pPacket, ( tANI_U16 ) nBytes, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, txFlag ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGE, FL("Failed to send Disassociation " "(%X)!\n"), nSirStatus ); //Pkt will be freed up by the callback return; } } // End limSendDisassocMgmtFrame. /** * \brief This function is called to send a Deauthenticate frame * * * \param pMac Pointer to global MAC structure * * \param nReason Indicates the reason that need to be sent in the * Deauthenticate frame * * \param peeer address of the STA to which the frame is to be sent * * */ void limSendDeauthMgmtFrame(tpAniSirGlobal pMac, tANI_U16 nReason, tSirMacAddr peer,tpPESession psessionEntry) { tDot11fDeAuth frm; tANI_U8 *pFrame; tSirRetStatus nSirStatus; tpSirMacMgmtHdr pMacHdr; tANI_U32 nBytes, nPayload, nStatus; void *pPacket; eHalStatus halstatus; tANI_U8 txFlag = 0; if(NULL == psessionEntry) { return; } palZeroMemory( pMac->hHdd, ( tANI_U8* ) &frm, sizeof( frm ) ); frm.Reason.code = nReason; nStatus = dot11fGetPackedDeAuthSize( pMac, &frm, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed size f" "or a De-Authentication (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fDeAuth ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for a De-Authentication " "(0x%08x).\n"), nStatus ); } nBytes = nPayload + sizeof( tSirMacMgmtHdr ); halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGP, FL("Failed to allocate %d bytes for a De-" "Authentication.\n"), nBytes ); return; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Next, we fill out the buffer descriptor: nSirStatus = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_DEAUTH, peer,psessionEntry->selfMacAddr); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to populate the buffer descrip" "tor for a De-Authentication (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // just allocated... } pMacHdr = ( tpSirMacMgmtHdr ) pFrame; // Prepare the BSSID sirCopyMacAddr(pMacHdr->bssId,psessionEntry->bssId); nStatus = dot11fPackDeAuth( pMac, &frm, pFrame + sizeof(tSirMacMgmtHdr), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack a DeAuthentication (0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing a D" "e-Authentication (0x%08x).\n") ); } PELOG1(limLog( pMac, LOG1, FL("*** Sending De-Authentication frame with rea" "son %d to\n"), nReason ); limPrintMacAddr( pMac, pMacHdr->da, LOG1 );) if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } // Queue Disassociation frame in high priority WQ halstatus = halTxFrame( pMac, pPacket, ( tANI_U16 ) nBytes, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, txFlag ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGE, FL("Failed to send De-Authentication " "(%X)!\n"), nSirStatus ); //Pkt will be freed up by the callback return; } } // End limSendDeauthMgmtFrame. #ifdef ANI_SUPPORT_11H /** * \brief Send a Measurement Report Action frame * * * \param pMac Pointer to the global MAC structure * * \param pMeasReqFrame Address of a tSirMacMeasReqActionFrame * * \return eSIR_SUCCESS on success, eSIR_FAILURE else * * */ tSirRetStatus limSendMeasReportFrame(tpAniSirGlobal pMac, tpSirMacMeasReqActionFrame pMeasReqFrame, tSirMacAddr peer) { tDot11fMeasurementReport frm; tANI_U8 *pFrame; tSirRetStatus nSirStatus; tpSirMacMgmtHdr pMacHdr; tANI_U32 nBytes, nPayload, nStatus, nCfg; void *pPacket; eHalStatus halstatus; palZeroMemory( pMac->hHdd, ( tANI_U8* )&frm, sizeof( frm ) ); frm.Category.category = SIR_MAC_ACTION_SPECTRUM_MGMT; frm.Action.action = SIR_MAC_ACTION_MEASURE_REPORT_ID; frm.DialogToken.token = pMeasReqFrame->actionHeader.dialogToken; switch ( pMeasReqFrame->measReqIE.measType ) { case SIR_MAC_BASIC_MEASUREMENT_TYPE: nSirStatus = PopulateDot11fMeasurementReport0( pMac, pMeasReqFrame, &frm.MeasurementReport ); break; case SIR_MAC_CCA_MEASUREMENT_TYPE: nSirStatus = PopulateDot11fMeasurementReport1( pMac, pMeasReqFrame, &frm.MeasurementReport ); break; case SIR_MAC_RPI_MEASUREMENT_TYPE: nSirStatus = PopulateDot11fMeasurementReport2( pMac, pMeasReqFrame, &frm.MeasurementReport ); break; default: limLog( pMac, LOGE, FL("Unknown measurement type %d in limSen" "dMeasReportFrame.\n"), pMeasReqFrame->measReqIE.measType ); return eSIR_FAILURE; } if ( eSIR_SUCCESS != nSirStatus ) return eSIR_FAILURE; nStatus = dot11fGetPackedMeasurementReportSize( pMac, &frm, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed size f" "or a Measurement Report (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fMeasurementReport ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for a Measurement Rep" "ort (0x%08x).\n"), nStatus ); } nBytes = nPayload + sizeof( tSirMacMgmtHdr ); halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGP, FL("Failed to allocate %d bytes for a De-" "Authentication.\n"), nBytes ); return eSIR_FAILURE; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Next, we fill out the buffer descriptor: nSirStatus = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_ACTION, peer); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to populate the buffer descrip" "tor for a Measurement Report (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return eSIR_FAILURE; // just allocated... } pMacHdr = ( tpSirMacMgmtHdr ) pFrame; nCfg = 6; nSirStatus = wlan_cfgGetStr( pMac, WNI_CFG_BSSID, pMacHdr->bssId, &nCfg ); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to retrieve WNI_CFG_BSSID from" " CFG (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return eSIR_FAILURE; // just allocated... } nStatus = dot11fPackMeasurementReport( pMac, &frm, pFrame + sizeof(tSirMacMgmtHdr), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack a Measurement Report (0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return eSIR_FAILURE; // allocated! } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing a M" "easurement Report (0x%08x).\n") ); } halstatus = halTxFrame( pMac, pPacket, ( tANI_U16 ) nBytes, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, 0 ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGE, FL("Failed to send a Measurement Report " "(%X)!\n"), nSirStatus ); //Pkt will be freed up by the callback return eSIR_FAILURE; // just allocated... } return eSIR_SUCCESS; } // End limSendMeasReportFrame. /** * \brief Send a TPC Request Action frame * * * \param pMac Pointer to the global MAC datastructure * * \param peer MAC address to which the frame should be sent * * */ void limSendTpcRequestFrame(tpAniSirGlobal pMac, tSirMacAddr peer) { tDot11fTPCRequest frm; tANI_U8 *pFrame; tSirRetStatus nSirStatus; tpSirMacMgmtHdr pMacHdr; tANI_U32 nBytes, nPayload, nStatus, nCfg; void *pPacket; eHalStatus halstatus; palZeroMemory( pMac->hHdd, ( tANI_U8* )&frm, sizeof( frm ) ); frm.Category.category = SIR_MAC_ACTION_SPECTRUM_MGMT; frm.Action.action = SIR_MAC_ACTION_TPC_REQUEST_ID; frm.DialogToken.token = 1; frm.TPCRequest.present = 1; nStatus = dot11fGetPackedTPCRequestSize( pMac, &frm, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed size f" "or a TPC Request (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fTPCRequest ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for a TPC Request (0x" "%08x).\n"), nStatus ); } nBytes = nPayload + sizeof( tSirMacMgmtHdr ); halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGP, FL("Failed to allocate %d bytes for a TPC" " Request.\n"), nBytes ); return; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Next, we fill out the buffer descriptor: nSirStatus = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_ACTION, peer); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to populate the buffer descrip" "tor for a TPC Request (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // just allocated... } pMacHdr = ( tpSirMacMgmtHdr ) pFrame; nCfg = 6; nSirStatus = wlan_cfgGetStr( pMac, WNI_CFG_BSSID, pMacHdr->bssId, &nCfg ); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to retrieve WNI_CFG_BSSID from" " CFG (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // just allocated... } nStatus = dot11fPackTPCRequest( pMac, &frm, pFrame + sizeof(tSirMacMgmtHdr), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack a TPC Request (0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return; // allocated! } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing a T" "PC Request (0x%08x).\n") ); } halstatus = halTxFrame( pMac, pPacket, ( tANI_U16 ) nBytes, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, 0 ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGE, FL("Failed to send a TPC Request " "(%X)!\n"), nSirStatus ); //Pkt will be freed up by the callback return; } } // End limSendTpcRequestFrame. /** * \brief Send a TPC Report Action frame * * * \param pMac Pointer to the global MAC datastructure * * \param pTpcReqFrame Pointer to the received TPC Request * * \return eSIR_SUCCESS on success, eSIR_FAILURE else * * */ tSirRetStatus limSendTpcReportFrame(tpAniSirGlobal pMac, tpSirMacTpcReqActionFrame pTpcReqFrame, tSirMacAddr peer) { tDot11fTPCReport frm; tANI_U8 *pFrame; tSirRetStatus nSirStatus; tpSirMacMgmtHdr pMacHdr; tANI_U32 nBytes, nPayload, nStatus, nCfg; void *pPacket; eHalStatus halstatus; palZeroMemory( pMac->hHdd, ( tANI_U8* )&frm, sizeof( frm ) ); frm.Category.category = SIR_MAC_ACTION_SPECTRUM_MGMT; frm.Action.action = SIR_MAC_ACTION_TPC_REPORT_ID; frm.DialogToken.token = pTpcReqFrame->actionHeader.dialogToken; // FramesToDo: On the Gen4_TVM branch, there was a comment: // "misplaced this function, need to replace: // txPower = halGetRateToPwrValue(pMac, staid, // pMac->lim.gLimCurrentChannelId, 0); frm.TPCReport.tx_power = 0; frm.TPCReport.link_margin = 0; frm.TPCReport.present = 1; nStatus = dot11fGetPackedTPCReportSize( pMac, &frm, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed size f" "or a TPC Report (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fTPCReport ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for a TPC Report (0x" "%08x).\n"), nStatus ); } nBytes = nPayload + sizeof( tSirMacMgmtHdr ); halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGP, FL("Failed to allocate %d bytes for a TPC" " Report.\n"), nBytes ); return eSIR_FAILURE; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Next, we fill out the buffer descriptor: nSirStatus = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_ACTION, peer); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to populate the buffer descrip" "tor for a TPC Report (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return eSIR_FAILURE; // just allocated... } pMacHdr = ( tpSirMacMgmtHdr ) pFrame; nCfg = 6; nSirStatus = wlan_cfgGetStr( pMac, WNI_CFG_BSSID, pMacHdr->bssId, &nCfg ); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to retrieve WNI_CFG_BSSID from" " CFG (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return eSIR_FAILURE; // just allocated... } nStatus = dot11fPackTPCReport( pMac, &frm, pFrame + sizeof(tSirMacMgmtHdr), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack a TPC Report (0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return eSIR_FAILURE; // allocated! } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing a T" "PC Report (0x%08x).\n") ); } halstatus = halTxFrame( pMac, pPacket, ( tANI_U16 ) nBytes, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, 0 ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGE, FL("Failed to send a TPC Report " "(%X)!\n"), nSirStatus ); //Pkt will be freed up by the callback return eSIR_FAILURE; // just allocated... } return eSIR_SUCCESS; } // End limSendTpcReportFrame. #endif //ANI_SUPPORT_11H #ifdef ANI_PRODUCT_TYPE_AP /** * \brief Send a Channel Switch Announcement * * * \param pMac Pointer to the global MAC datastructure * * \param peer MAC address to which this frame will be sent * * \param nMode * * \param nNewChannel * * \param nCount * * \return eSIR_SUCCESS on success, eSIR_FAILURE else * * */ tSirRetStatus limSendChannelSwitchMgmtFrame(tpAniSirGlobal pMac, tSirMacAddr peer, tANI_U8 nMode, tANI_U8 nNewChannel, tANI_U8 nCount) { tDot11fChannelSwitch frm; tANI_U8 *pFrame; tSirRetStatus nSirStatus; tpSirMacMgmtHdr pMacHdr; tANI_U32 nBytes, nPayload, nStatus, nCfg; void *pPacket; eHalStatus halstatus; palZeroMemory( pMac->hHdd, ( tANI_U8* )&frm, sizeof( frm ) ); frm.Category.category = SIR_MAC_ACTION_SPECTRUM_MGMT; frm.Action.action = SIR_MAC_ACTION_CHANNEL_SWITCH_ID; frm.ChanSwitchAnn.switchMode = nMode; frm.ChanSwitchAnn.newChannel = nNewChannel; frm.ChanSwitchAnn.switchCount = nCount; frm.ChanSwitchAnn.present = 1; nStatus = dot11fGetPackedChannelSwitchSize( pMac, &frm, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed size f" "or a Channel Switch (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fChannelSwitch ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for a Channel Switch (0x" "%08x).\n"), nStatus ); } nBytes = nPayload + sizeof( tSirMacMgmtHdr ); halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGP, FL("Failed to allocate %d bytes for a TPC" " Report.\n"), nBytes ); return eSIR_FAILURE; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Next, we fill out the buffer descriptor: nSirStatus = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_ACTION, peer); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to populate the buffer descrip" "tor for a Channel Switch (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return eSIR_FAILURE; // just allocated... } pMacHdr = ( tpSirMacMgmtHdr ) pFrame; nCfg = 6; nSirStatus = wlan_cfgGetStr( pMac, WNI_CFG_BSSID, pMacHdr->bssId, &nCfg ); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to retrieve WNI_CFG_BSSID from" " CFG (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return eSIR_FAILURE; // just allocated... } nStatus = dot11fPackChannelSwitch( pMac, &frm, pFrame + sizeof(tSirMacMgmtHdr), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGE, FL("Failed to pack a Channel Switch (0x%08x).\n"), nStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return eSIR_FAILURE; // allocated! } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while packing a C" "hannel Switch (0x%08x).\n") ); } halstatus = halTxFrame( pMac, pPacket, ( tANI_U16 ) nBytes, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, 0 ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGE, FL("Failed to send a Channel Switch " "(%X)!\n"), nSirStatus ); //Pkt will be freed up by the callback return eSIR_FAILURE; } return eSIR_SUCCESS; } // End limSendChannelSwitchMgmtFrame. #endif // (ANI_PRODUCT_TYPE_AP) /** * \brief Send an ADDBA Req Action Frame to peer * * \sa limSendAddBAReq * * \param pMac The global tpAniSirGlobal object * * \param pMlmAddBAReq A pointer to tLimMlmAddBAReq. This contains * the necessary parameters reqd by PE send the ADDBA Req Action * Frame to the peer * * \return eSIR_SUCCESS if setup completes successfully * eSIR_FAILURE is some problem is encountered */ tSirRetStatus limSendAddBAReq( tpAniSirGlobal pMac, tpLimMlmAddBAReq pMlmAddBAReq ,tpPESession psessionEntry) { tDot11fAddBAReq frmAddBAReq; tANI_U8 *pAddBAReqBuffer = NULL; tpSirMacMgmtHdr pMacHdr; tANI_U32 frameLen = 0, nStatus, nPayload; tSirRetStatus statusCode; eHalStatus halStatus; void *pPacket; tANI_U8 txFlag = 0; if(NULL == psessionEntry) { return eSIR_FAILURE; } palZeroMemory( pMac->hHdd, (void *) &frmAddBAReq, sizeof( frmAddBAReq )); // Category - 3 (BA) frmAddBAReq.Category.category = SIR_MAC_ACTION_BLKACK; // Action - 0 (ADDBA Req) frmAddBAReq.Action.action = SIR_MAC_BLKACK_ADD_REQ; // FIXME - Dialog Token, generalize this... frmAddBAReq.DialogToken.token = pMlmAddBAReq->baDialogToken; // Fill the ADDBA Parameter Set frmAddBAReq.AddBAParameterSet.tid = pMlmAddBAReq->baTID; frmAddBAReq.AddBAParameterSet.policy = pMlmAddBAReq->baPolicy; frmAddBAReq.AddBAParameterSet.bufferSize = pMlmAddBAReq->baBufferSize; // BA timeout // 0 - indicates no BA timeout frmAddBAReq.BATimeout.timeout = pMlmAddBAReq->baTimeout; // BA Starting Sequence Number // Fragment number will always be zero if (pMlmAddBAReq->baSSN < LIM_TX_FRAMES_THRESHOLD_ON_CHIP) { pMlmAddBAReq->baSSN = LIM_TX_FRAMES_THRESHOLD_ON_CHIP; } frmAddBAReq.BAStartingSequenceControl.ssn = pMlmAddBAReq->baSSN - LIM_TX_FRAMES_THRESHOLD_ON_CHIP; nStatus = dot11fGetPackedAddBAReqSize( pMac, &frmAddBAReq, &nPayload ); if( DOT11F_FAILED( nStatus )) { limLog( pMac, LOGW, FL( "Failed to calculate the packed size for " "an ADDBA Request (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fAddBAReq ); } else if( DOT11F_WARNED( nStatus )) { limLog( pMac, LOGW, FL( "There were warnings while calculating" "the packed size for an ADDBA Req (0x%08x).\n"), nStatus ); } // Add the MGMT header to frame length frameLen = nPayload + sizeof( tSirMacMgmtHdr ); // Need to allocate a buffer for ADDBA AF if( eHAL_STATUS_SUCCESS != (halStatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, (tANI_U16) frameLen, (void **) &pAddBAReqBuffer, (void **) &pPacket ))) { // Log error limLog( pMac, LOGP, FL("palPktAlloc FAILED! Length [%d], Status [%d]\n"), frameLen, halStatus ); statusCode = eSIR_MEM_ALLOC_FAILED; goto returnAfterError; } palZeroMemory( pMac->hHdd, (void *) pAddBAReqBuffer, frameLen ); // Copy necessary info to BD if( eSIR_SUCCESS != (statusCode = limPopulateMacHeader( pMac, pAddBAReqBuffer, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_ACTION, pMlmAddBAReq->peerMacAddr,psessionEntry->selfMacAddr))) goto returnAfterError; // Update A3 with the BSSID pMacHdr = ( tpSirMacMgmtHdr ) pAddBAReqBuffer; #if 0 cfgLen = SIR_MAC_ADDR_LENGTH; if( eSIR_SUCCESS != cfgGetStr( pMac, WNI_CFG_BSSID, (tANI_U8 *) pMacHdr->bssId, &cfgLen )) { limLog( pMac, LOGP, FL( "Failed to retrieve WNI_CFG_BSSID while" "sending an ACTION Frame\n" )); // FIXME - Need to convert to tSirRetStatus statusCode = eSIR_FAILURE; goto returnAfterError; } #endif//TO SUPPORT BT-AMP sirCopyMacAddr(pMacHdr->bssId,psessionEntry->bssId); // Now, we're ready to "pack" the frames nStatus = dot11fPackAddBAReq( pMac, &frmAddBAReq, pAddBAReqBuffer + sizeof( tSirMacMgmtHdr ), nPayload, &nPayload ); if( DOT11F_FAILED( nStatus )) { limLog( pMac, LOGE, FL( "Failed to pack an ADDBA Req (0x%08x).\n" ), nStatus ); // FIXME - Need to convert to tSirRetStatus statusCode = eSIR_FAILURE; goto returnAfterError; } else if( DOT11F_WARNED( nStatus )) { limLog( pMac, LOGW, FL( "There were warnings while packing an ADDBA Req (0x%08x).\n" )); } limLog( pMac, LOGW, FL( "Sending an ADDBA REQ to \n" )); limPrintMacAddr( pMac, pMlmAddBAReq->peerMacAddr, LOGW ); if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } if( eHAL_STATUS_SUCCESS != (halStatus = halTxFrame( pMac, pPacket, (tANI_U16) frameLen, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pAddBAReqBuffer, txFlag ))) { limLog( pMac, LOGE, FL( "halTxFrame FAILED! Status [%d]\n"), halStatus ); // FIXME - Need to convert eHalStatus to tSirRetStatus statusCode = eSIR_FAILURE; //Pkt will be freed up by the callback return statusCode; } else return eSIR_SUCCESS; returnAfterError: // Release buffer, if allocated if( NULL != pAddBAReqBuffer ) palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, (void *) pAddBAReqBuffer, (void *) pPacket ); return statusCode; } /** * \brief Send an ADDBA Rsp Action Frame to peer * * \sa limSendAddBARsp * * \param pMac The global tpAniSirGlobal object * * \param pMlmAddBARsp A pointer to tLimMlmAddBARsp. This contains * the necessary parameters reqd by PE send the ADDBA Rsp Action * Frame to the peer * * \return eSIR_SUCCESS if setup completes successfully * eSIR_FAILURE is some problem is encountered */ tSirRetStatus limSendAddBARsp( tpAniSirGlobal pMac, tpLimMlmAddBARsp pMlmAddBARsp, tpPESession psessionEntry) { tDot11fAddBARsp frmAddBARsp; tANI_U8 *pAddBARspBuffer = NULL; tpSirMacMgmtHdr pMacHdr; tANI_U32 frameLen = 0, nStatus, nPayload; tSirRetStatus statusCode; eHalStatus halStatus; void *pPacket; tANI_U8 txFlag = 0; if(NULL == psessionEntry) { PELOGE(limLog(pMac, LOGE, FL("Session entry is NULL!!!\n"));) return eSIR_FAILURE; } palZeroMemory( pMac->hHdd, (void *) &frmAddBARsp, sizeof( frmAddBARsp )); // Category - 3 (BA) frmAddBARsp.Category.category = SIR_MAC_ACTION_BLKACK; // Action - 1 (ADDBA Rsp) frmAddBARsp.Action.action = SIR_MAC_BLKACK_ADD_RSP; // Should be same as the one we received in the ADDBA Req frmAddBARsp.DialogToken.token = pMlmAddBARsp->baDialogToken; // ADDBA Req status frmAddBARsp.Status.status = pMlmAddBARsp->addBAResultCode; // Fill the ADDBA Parameter Set as provided by caller frmAddBARsp.AddBAParameterSet.tid = pMlmAddBARsp->baTID; frmAddBARsp.AddBAParameterSet.policy = pMlmAddBARsp->baPolicy; frmAddBARsp.AddBAParameterSet.bufferSize = pMlmAddBARsp->baBufferSize; // BA timeout // 0 - indicates no BA timeout frmAddBARsp.BATimeout.timeout = pMlmAddBARsp->baTimeout; nStatus = dot11fGetPackedAddBARspSize( pMac, &frmAddBARsp, &nPayload ); if( DOT11F_FAILED( nStatus )) { limLog( pMac, LOGW, FL( "Failed to calculate the packed size for " "an ADDBA Response (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fAddBARsp ); } else if( DOT11F_WARNED( nStatus )) { limLog( pMac, LOGW, FL( "There were warnings while calculating" "the packed size for an ADDBA Rsp (0x%08x).\n"), nStatus ); } // Need to allocate a buffer for ADDBA AF frameLen = nPayload + sizeof( tSirMacMgmtHdr ); // Allocate shared memory if( eHAL_STATUS_SUCCESS != (halStatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, (tANI_U16) frameLen, (void **) &pAddBARspBuffer, (void **) &pPacket ))) { // Log error limLog( pMac, LOGP, FL("palPktAlloc FAILED! Length [%d], Status [%d]\n"), frameLen, halStatus ); statusCode = eSIR_MEM_ALLOC_FAILED; goto returnAfterError; } palZeroMemory( pMac->hHdd, (void *) pAddBARspBuffer, frameLen ); // Copy necessary info to BD if( eSIR_SUCCESS != (statusCode = limPopulateMacHeader( pMac, pAddBARspBuffer, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_ACTION, pMlmAddBARsp->peerMacAddr,psessionEntry->selfMacAddr))) goto returnAfterError; // Update A3 with the BSSID pMacHdr = ( tpSirMacMgmtHdr ) pAddBARspBuffer; #if 0 cfgLen = SIR_MAC_ADDR_LENGTH; if( eSIR_SUCCESS != wlan_cfgGetStr( pMac, WNI_CFG_BSSID, (tANI_U8 *) pMacHdr->bssId, &cfgLen )) { limLog( pMac, LOGP, FL( "Failed to retrieve WNI_CFG_BSSID while" "sending an ACTION Frame\n" )); // FIXME - Need to convert to tSirRetStatus statusCode = eSIR_FAILURE; goto returnAfterError; } #endif // TO SUPPORT BT-AMP sirCopyMacAddr(pMacHdr->bssId,psessionEntry->bssId); // Now, we're ready to "pack" the frames nStatus = dot11fPackAddBARsp( pMac, &frmAddBARsp, pAddBARspBuffer + sizeof( tSirMacMgmtHdr ), nPayload, &nPayload ); if( DOT11F_FAILED( nStatus )) { limLog( pMac, LOGE, FL( "Failed to pack an ADDBA Rsp (0x%08x).\n" ), nStatus ); // FIXME - Need to convert to tSirRetStatus statusCode = eSIR_FAILURE; goto returnAfterError; } else if( DOT11F_WARNED( nStatus )) { limLog( pMac, LOGW, FL( "There were warnings while packing an ADDBA Rsp (0x%08x).\n" )); } limLog( pMac, LOGW, FL( "Sending an ADDBA RSP to \n" )); limPrintMacAddr( pMac, pMlmAddBARsp->peerMacAddr, LOGW ); if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } if( eHAL_STATUS_SUCCESS != (halStatus = halTxFrame( pMac, pPacket, (tANI_U16) frameLen, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pAddBARspBuffer, txFlag ))) { limLog( pMac, LOGE, FL( "halTxFrame FAILED! Status [%d]\n" ), halStatus ); // FIXME - HAL error codes are different from PE error // codes!! And, this routine is returning tSirRetStatus statusCode = eSIR_FAILURE; //Pkt will be freed up by the callback return statusCode; } else return eSIR_SUCCESS; returnAfterError: // Release buffer, if allocated if( NULL != pAddBARspBuffer ) palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, (void *) pAddBARspBuffer, (void *) pPacket ); return statusCode; } /** * \brief Send a DELBA Indication Action Frame to peer * * \sa limSendDelBAInd * * \param pMac The global tpAniSirGlobal object * * \param peerMacAddr MAC Address of peer * * \param reasonCode Reason for the DELBA notification * * \param pBAParameterSet The DELBA Parameter Set. * This identifies the TID for which the BA session is * being deleted. * * \return eSIR_SUCCESS if setup completes successfully * eSIR_FAILURE is some problem is encountered */ tSirRetStatus limSendDelBAInd( tpAniSirGlobal pMac, tpLimMlmDelBAReq pMlmDelBAReq,tpPESession psessionEntry) { tDot11fDelBAInd frmDelBAInd; tANI_U8 *pDelBAIndBuffer = NULL; //tANI_U32 val; tpSirMacMgmtHdr pMacHdr; tANI_U32 frameLen = 0, nStatus, nPayload; tSirRetStatus statusCode; eHalStatus halStatus; void *pPacket; tANI_U8 txFlag = 0; if(NULL == psessionEntry) { return eSIR_FAILURE; } palZeroMemory( pMac->hHdd, (void *) &frmDelBAInd, sizeof( frmDelBAInd )); // Category - 3 (BA) frmDelBAInd.Category.category = SIR_MAC_ACTION_BLKACK; // Action - 2 (DELBA) frmDelBAInd.Action.action = SIR_MAC_BLKACK_DEL; // Fill the DELBA Parameter Set as provided by caller frmDelBAInd.DelBAParameterSet.tid = pMlmDelBAReq->baTID; frmDelBAInd.DelBAParameterSet.initiator = pMlmDelBAReq->baDirection; // BA Starting Sequence Number // Fragment number will always be zero frmDelBAInd.Reason.code = pMlmDelBAReq->delBAReasonCode; nStatus = dot11fGetPackedDelBAIndSize( pMac, &frmDelBAInd, &nPayload ); if( DOT11F_FAILED( nStatus )) { limLog( pMac, LOGW, FL( "Failed to calculate the packed size for " "an DELBA Indication (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fDelBAInd ); } else if( DOT11F_WARNED( nStatus )) { limLog( pMac, LOGW, FL( "There were warnings while calculating" "the packed size for an DELBA Ind (0x%08x).\n"), nStatus ); } // Add the MGMT header to frame length frameLen = nPayload + sizeof( tSirMacMgmtHdr ); // Allocate shared memory if( eHAL_STATUS_SUCCESS != (halStatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, (tANI_U16) frameLen, (void **) &pDelBAIndBuffer, (void **) &pPacket ))) { // Log error limLog( pMac, LOGP, FL("palPktAlloc FAILED! Length [%d], Status [%d]\n"), frameLen, halStatus ); statusCode = eSIR_MEM_ALLOC_FAILED; goto returnAfterError; } palZeroMemory( pMac->hHdd, (void *) pDelBAIndBuffer, frameLen ); // Copy necessary info to BD if( eSIR_SUCCESS != (statusCode = limPopulateMacHeader( pMac, pDelBAIndBuffer, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_ACTION, pMlmDelBAReq->peerMacAddr,psessionEntry->selfMacAddr))) goto returnAfterError; // Update A3 with the BSSID pMacHdr = ( tpSirMacMgmtHdr ) pDelBAIndBuffer; #if 0 cfgLen = SIR_MAC_ADDR_LENGTH; if( eSIR_SUCCESS != cfgGetStr( pMac, WNI_CFG_BSSID, (tANI_U8 *) pMacHdr->bssId, &cfgLen )) { limLog( pMac, LOGP, FL( "Failed to retrieve WNI_CFG_BSSID while" "sending an ACTION Frame\n" )); // FIXME - Need to convert to tSirRetStatus statusCode = eSIR_FAILURE; goto returnAfterError; } #endif //TO SUPPORT BT-AMP sirCopyMacAddr(pMacHdr->bssId,psessionEntry->bssId); // Now, we're ready to "pack" the frames nStatus = dot11fPackDelBAInd( pMac, &frmDelBAInd, pDelBAIndBuffer + sizeof( tSirMacMgmtHdr ), nPayload, &nPayload ); if( DOT11F_FAILED( nStatus )) { limLog( pMac, LOGE, FL( "Failed to pack an DELBA Ind (0x%08x).\n" ), nStatus ); // FIXME - Need to convert to tSirRetStatus statusCode = eSIR_FAILURE; goto returnAfterError; } else if( DOT11F_WARNED( nStatus )) { limLog( pMac, LOGW, FL( "There were warnings while packing an DELBA Ind (0x%08x).\n" )); } limLog( pMac, LOGW, FL( "Sending a DELBA IND to \n" )); limPrintMacAddr( pMac, pMlmDelBAReq->peerMacAddr, LOGW ); if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } if( eHAL_STATUS_SUCCESS != (halStatus = halTxFrame( pMac, pPacket, (tANI_U16) frameLen, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pDelBAIndBuffer, txFlag ))) { PELOGE(limLog( pMac, LOGE, FL( "halTxFrame FAILED! Status [%d]\n" ), halStatus );) statusCode = eSIR_FAILURE; //Pkt will be freed up by the callback return statusCode; } else return eSIR_SUCCESS; returnAfterError: // Release buffer, if allocated if( NULL != pDelBAIndBuffer ) palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, (void *) pDelBAIndBuffer, (void *) pPacket ); return statusCode; } #if defined WLAN_FEATURE_VOWIFI /** * \brief Send a Neighbor Report Request Action frame * * * \param pMac Pointer to the global MAC structure * * \param pNeighborReq Address of a tSirMacNeighborReportReq * * \param peer mac address of peer station. * * \param psessionEntry address of session entry. * * \return eSIR_SUCCESS on success, eSIR_FAILURE else * * */ tSirRetStatus limSendNeighborReportRequestFrame(tpAniSirGlobal pMac, tpSirMacNeighborReportReq pNeighborReq, tSirMacAddr peer, tpPESession psessionEntry ) { tSirRetStatus statusCode = eSIR_SUCCESS; tDot11fNeighborReportRequest frm; tANI_U8 *pFrame; tpSirMacMgmtHdr pMacHdr; tANI_U32 nBytes, nPayload, nStatus; void *pPacket; eHalStatus halstatus; tANI_U8 txFlag = 0; if ( psessionEntry == NULL ) { limLog( pMac, LOGE, FL("(psession == NULL) in Request to send Neighbor Report request action frame\n") ); return eSIR_FAILURE; } palZeroMemory( pMac->hHdd, ( tANI_U8* )&frm, sizeof( frm ) ); frm.Category.category = SIR_MAC_ACTION_RRM; frm.Action.action = SIR_MAC_RRM_NEIGHBOR_REQ; frm.DialogToken.token = pNeighborReq->dialogToken; if( pNeighborReq->ssid_present ) { PopulateDot11fSSID( pMac, &pNeighborReq->ssid, &frm.SSID ); } nStatus = dot11fGetPackedNeighborReportRequestSize( pMac, &frm, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed size f" "or a Neighbor Report Request(0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fNeighborReportRequest ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for a Neighbor Rep" "ort Request(0x%08x).\n"), nStatus ); } nBytes = nPayload + sizeof( tSirMacMgmtHdr ); halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGP, FL("Failed to allocate %d bytes for a Neighbor " "Report Request.\n"), nBytes ); return eSIR_FAILURE; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Copy necessary info to BD if( eSIR_SUCCESS != (statusCode = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_ACTION, peer, psessionEntry->selfMacAddr))) goto returnAfterError; // Update A3 with the BSSID pMacHdr = ( tpSirMacMgmtHdr ) pFrame; sirCopyMacAddr( pMacHdr->bssId, psessionEntry->bssId ); // Now, we're ready to "pack" the frames nStatus = dot11fPackNeighborReportRequest( pMac, &frm, pFrame + sizeof( tSirMacMgmtHdr ), nPayload, &nPayload ); if( DOT11F_FAILED( nStatus )) { limLog( pMac, LOGE, FL( "Failed to pack an Neighbor Report Request (0x%08x).\n" ), nStatus ); // FIXME - Need to convert to tSirRetStatus statusCode = eSIR_FAILURE; goto returnAfterError; } else if( DOT11F_WARNED( nStatus )) { limLog( pMac, LOGW, FL( "There were warnings while packing Neighbor Report Request (0x%08x).\n" )); } limLog( pMac, LOGW, FL( "Sending a Neighbor Report Request to \n" )); limPrintMacAddr( pMac, peer, LOGW ); if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } if( eHAL_STATUS_SUCCESS != (halstatus = halTxFrame( pMac, pPacket, (tANI_U16) nBytes, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, txFlag ))) { PELOGE(limLog( pMac, LOGE, FL( "halTxFrame FAILED! Status [%d]\n" ), halstatus );) statusCode = eSIR_FAILURE; //Pkt will be freed up by the callback return statusCode; } else return eSIR_SUCCESS; returnAfterError: palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return statusCode; } // End limSendNeighborReportRequestFrame. /** * \brief Send a Link Report Action frame * * * \param pMac Pointer to the global MAC structure * * \param pLinkReport Address of a tSirMacLinkReport * * \param peer mac address of peer station. * * \param psessionEntry address of session entry. * * \return eSIR_SUCCESS on success, eSIR_FAILURE else * * */ tSirRetStatus limSendLinkReportActionFrame(tpAniSirGlobal pMac, tpSirMacLinkReport pLinkReport, tSirMacAddr peer, tpPESession psessionEntry ) { tSirRetStatus statusCode = eSIR_SUCCESS; tDot11fLinkMeasurementReport frm; tANI_U8 *pFrame; tpSirMacMgmtHdr pMacHdr; tANI_U32 nBytes, nPayload, nStatus; void *pPacket; eHalStatus halstatus; tANI_U8 txFlag = 0; if ( psessionEntry == NULL ) { limLog( pMac, LOGE, FL("(psession == NULL) in Request to send Link Report action frame\n") ); return eSIR_FAILURE; } palZeroMemory( pMac->hHdd, ( tANI_U8* )&frm, sizeof( frm ) ); frm.Category.category = SIR_MAC_ACTION_RRM; frm.Action.action = SIR_MAC_RRM_LINK_MEASUREMENT_RPT; frm.DialogToken.token = pLinkReport->dialogToken; //IEEE Std. 802.11 7.3.2.18. for the report element. //Even though TPC report an IE, it is represented using fixed fields since it is positioned //in the middle of other fixed fields in the link report frame(IEEE Std. 802.11k section7.4.6.4 //and frame parser always expects IEs to come after all fixed fields. It is easier to handle //such case this way than changing the frame parser. frm.TPCEleID.TPCId = SIR_MAC_TPC_RPT_EID; frm.TPCEleLen.TPCLen = 2; frm.TxPower.txPower = pLinkReport->txPower; frm.LinkMargin.linkMargin = 0; frm.RxAntennaId.antennaId = pLinkReport->rxAntenna; frm.TxAntennaId.antennaId = pLinkReport->txAntenna; frm.RCPI.rcpi = pLinkReport->rcpi; frm.RSNI.rsni = pLinkReport->rsni; nStatus = dot11fGetPackedLinkMeasurementReportSize( pMac, &frm, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed size f" "or a Link Report (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fLinkMeasurementReport ); } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for a Link Rep" "ort (0x%08x).\n"), nStatus ); } nBytes = nPayload + sizeof( tSirMacMgmtHdr ); halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGP, FL("Failed to allocate %d bytes for a Link " "Report.\n"), nBytes ); return eSIR_FAILURE; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Copy necessary info to BD if( eSIR_SUCCESS != (statusCode = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_ACTION, peer, psessionEntry->selfMacAddr))) goto returnAfterError; // Update A3 with the BSSID pMacHdr = ( tpSirMacMgmtHdr ) pFrame; sirCopyMacAddr( pMacHdr->bssId, psessionEntry->bssId ); // Now, we're ready to "pack" the frames nStatus = dot11fPackLinkMeasurementReport( pMac, &frm, pFrame + sizeof( tSirMacMgmtHdr ), nPayload, &nPayload ); if( DOT11F_FAILED( nStatus )) { limLog( pMac, LOGE, FL( "Failed to pack an Link Report (0x%08x).\n" ), nStatus ); // FIXME - Need to convert to tSirRetStatus statusCode = eSIR_FAILURE; goto returnAfterError; } else if( DOT11F_WARNED( nStatus )) { limLog( pMac, LOGW, FL( "There were warnings while packing Link Report (0x%08x).\n" )); } limLog( pMac, LOGW, FL( "Sending a Link Report to \n" )); limPrintMacAddr( pMac, peer, LOGW ); if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } if( eHAL_STATUS_SUCCESS != (halstatus = halTxFrame( pMac, pPacket, (tANI_U16) nBytes, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, txFlag ))) { PELOGE(limLog( pMac, LOGE, FL( "halTxFrame FAILED! Status [%d]\n" ), halstatus );) statusCode = eSIR_FAILURE; //Pkt will be freed up by the callback return statusCode; } else return eSIR_SUCCESS; returnAfterError: palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return statusCode; } // End limSendLinkReportActionFrame. /** * \brief Send a Beacon Report Action frame * * * \param pMac Pointer to the global MAC structure * * \param dialog_token dialog token to be used in the action frame. * * \param num_report number of reports in pRRMReport. * * \param pRRMReport Address of a tSirMacRadioMeasureReport. * * \param peer mac address of peer station. * * \param psessionEntry address of session entry. * * \return eSIR_SUCCESS on success, eSIR_FAILURE else * * */ tSirRetStatus limSendRadioMeasureReportActionFrame(tpAniSirGlobal pMac, tANI_U8 dialog_token, tANI_U8 num_report, tpSirMacRadioMeasureReport pRRMReport, tSirMacAddr peer, tpPESession psessionEntry ) { tSirRetStatus statusCode = eSIR_SUCCESS; tDot11fRadioMeasurementReport frm; tANI_U8 *pFrame; tpSirMacMgmtHdr pMacHdr; tANI_U32 nBytes, nPayload, nStatus; void *pPacket; eHalStatus halstatus; tANI_U8 i; tANI_U8 txFlag = 0; if ( psessionEntry == NULL ) { limLog( pMac, LOGE, FL("(psession == NULL) in Request to send Beacon Report action frame\n") ); return eSIR_FAILURE; } palZeroMemory( pMac->hHdd, ( tANI_U8* )&frm, sizeof( frm ) ); frm.Category.category = SIR_MAC_ACTION_RRM; frm.Action.action = SIR_MAC_RRM_RADIO_MEASURE_RPT; frm.DialogToken.token = dialog_token; frm.num_MeasurementReport = (num_report > RADIO_REPORTS_MAX_IN_A_FRAME ) ? RADIO_REPORTS_MAX_IN_A_FRAME : num_report; for( i = 0 ; i < frm.num_MeasurementReport ; i++ ) { frm.MeasurementReport[i].type = pRRMReport[i].type; frm.MeasurementReport[i].token = pRRMReport[i].token; frm.MeasurementReport[i].late = 0; //IEEE 802.11k section 7.3.22. (always zero in rrm) switch( pRRMReport[i].type ) { case SIR_MAC_RRM_BEACON_TYPE: PopulateDot11fBeaconReport( pMac, &frm.MeasurementReport[i], &pRRMReport[i].report.beaconReport ); frm.MeasurementReport[i].incapable = pRRMReport[i].incapable; frm.MeasurementReport[i].refused = pRRMReport[i].refused; frm.MeasurementReport[i].present = 1; break; default: frm.MeasurementReport[i].present = 1; break; } } nStatus = dot11fGetPackedRadioMeasurementReportSize( pMac, &frm, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { limLog( pMac, LOGP, FL("Failed to calculate the packed size f" "or a Radio Measure Report (0x%08x).\n"), nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fLinkMeasurementReport ); return eSIR_FAILURE; } else if ( DOT11F_WARNED( nStatus ) ) { limLog( pMac, LOGW, FL("There were warnings while calculating" "the packed size for a Radio Measure Rep" "ort (0x%08x).\n"), nStatus ); } nBytes = nPayload + sizeof( tSirMacMgmtHdr ); halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( tANI_U16 )nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGP, FL("Failed to allocate %d bytes for a Radio Measure " "Report.\n"), nBytes ); return eSIR_FAILURE; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Copy necessary info to BD if( eSIR_SUCCESS != (statusCode = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_ACTION, peer, psessionEntry->selfMacAddr))) goto returnAfterError; // Update A3 with the BSSID pMacHdr = ( tpSirMacMgmtHdr ) pFrame; sirCopyMacAddr( pMacHdr->bssId, psessionEntry->bssId ); // Now, we're ready to "pack" the frames nStatus = dot11fPackRadioMeasurementReport( pMac, &frm, pFrame + sizeof( tSirMacMgmtHdr ), nPayload, &nPayload ); if( DOT11F_FAILED( nStatus )) { limLog( pMac, LOGE, FL( "Failed to pack an Radio Measure Report (0x%08x).\n" ), nStatus ); // FIXME - Need to convert to tSirRetStatus statusCode = eSIR_FAILURE; goto returnAfterError; } else if( DOT11F_WARNED( nStatus )) { limLog( pMac, LOGW, FL( "There were warnings while packing Radio Measure Report (0x%08x).\n" )); } limLog( pMac, LOGW, FL( "Sending a Radio Measure Report to \n" )); limPrintMacAddr( pMac, peer, LOGW ); if( ( SIR_BAND_5_GHZ == limGetRFBand(psessionEntry->currentOperChannel)) #ifdef WLAN_FEATURE_P2P || ( psessionEntry->pePersona == VOS_P2P_CLIENT_MODE ) || ( psessionEntry->pePersona == VOS_P2P_GO_MODE) #endif ) { txFlag |= HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME; } if( eHAL_STATUS_SUCCESS != (halstatus = halTxFrame( pMac, pPacket, (tANI_U16) nBytes, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame, txFlag ))) { PELOGE(limLog( pMac, LOGE, FL( "halTxFrame FAILED! Status [%d]\n" ), halstatus );) statusCode = eSIR_FAILURE; //Pkt will be freed up by the callback return statusCode; } else return eSIR_SUCCESS; returnAfterError: palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return statusCode; } // End limSendBeaconReportActionFrame. #endif #ifdef WLAN_FEATURE_11W /** * \brief Send SA query response action frame to peer * * \sa limSendSaQueryResponseFrame * * * \param pMac The global tpAniSirGlobal object * * \param peer The Mac address of the AP to which this action frame is addressed * * \param transId Transaction identifier received in SA query request action frame * * \return eSIR_SUCCESS if setup completes successfully * eSIR_FAILURE is some problem is encountered */ tSirRetStatus limSendSaQueryResponseFrame( tpAniSirGlobal pMac, tANI_U16 transId, tSirMacAddr peer,tpPESession psessionEntry) { tDot11wSaQueryRsp frm; // SA query reponse action frame tANI_U8 *pFrame; tSirRetStatus nSirStatus; tpSirMacMgmtHdr pMacHdr; tANI_U32 nBytes, nPayload; void *pPacket; eHalStatus halstatus; // Local variables used to dump prepared SA query response frame tANI_U8 *pDump; tANI_U16 dumpCount; tANI_U8 txFlag = 0; //tANI_U16 nBytes palZeroMemory( pMac->hHdd, ( tANI_U8* )&frm, sizeof( frm ) ); frm.category = SIR_MAC_ACTION_SA_QUERY; /*11w action fiedl is : action: 0 --> SA query request action frame action: 1 --> SA query response action frame */ frm.action = 1; /*11w Draft9.0 SA query response transId is same as SA query request transId*/ frm.transId = transId; nPayload = sizeof(tDot11wSaQueryRsp); nBytes = nPayload + sizeof( tSirMacMgmtHdr ); halstatus = palPktAlloc( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, nBytes, ( void** ) &pFrame, ( void** ) &pPacket ); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGP, FL("Failed to allocate %d bytes for a SA query response" " action frame\n"), nBytes ); return eSIR_FAILURE; } // Paranoia: palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Next, we fill out the buffer descriptor: nSirStatus = limPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_ACTION, peer,psessionEntry->selfMacAddr ); if ( eSIR_SUCCESS != nSirStatus ) { limLog( pMac, LOGE, FL("Failed to populate the buffer descrip" "tor for a TPC Report (%d).\n"), nSirStatus ); palPktFree( pMac->hHdd, HAL_TXRX_FRM_802_11_MGMT, ( void* ) pFrame, ( void* ) pPacket ); return eSIR_FAILURE; // just allocated... } pMacHdr = ( tpSirMacMgmtHdr ) pFrame; // Pack 11w SA query response frame DOT11F_MEMCPY(pMac, (tANI_U8 *)(pFrame + sizeof(tSirMacMgmtHdr)),(tANI_U8 *)&frm, nPayload); pDump = (tANI_U8 *) pFrame; halstatus = halTxFrame( pMac, pPacket, ( tANI_U16 ) nBytes, HAL_TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,//SMAC_SWBD_TX_TID_MGMT_HIGH, limTxComplete, pFrame,txFlag); if ( ! HAL_STATUS_SUCCESS ( halstatus ) ) { limLog( pMac, LOGE, FL("Failed to send a SA Query resp frame " "(%X)!\n"),halstatus ); //Pkt will be freed up by the callback return eSIR_FAILURE; // just allocated... } return eSIR_SUCCESS; } #endif
gpl-2.0
KylinUI/android_kernel_htc_m7
lib/klist.c
333
6952
/* * klist.c - Routines for manipulating klists. * * Copyright (C) 2005 Patrick Mochel * * This file is released under the GPL v2. * * This klist interface provides a couple of structures that wrap around * struct list_head to provide explicit list "head" (struct klist) and list * "node" (struct klist_node) objects. For struct klist, a spinlock is * included that protects access to the actual list itself. struct * klist_node provides a pointer to the klist that owns it and a kref * reference count that indicates the number of current users of that node * in the list. * * The entire point is to provide an interface for iterating over a list * that is safe and allows for modification of the list during the * iteration (e.g. insertion and removal), including modification of the * current node on the list. * * It works using a 3rd object type - struct klist_iter - that is declared * and initialized before an iteration. klist_next() is used to acquire the * next element in the list. It returns NULL if there are no more items. * Internally, that routine takes the klist's lock, decrements the * reference count of the previous klist_node and increments the count of * the next klist_node. It then drops the lock and returns. * * There are primitives for adding and removing nodes to/from a klist. * When deleting, klist_del() will simply decrement the reference count. * Only when the count goes to 0 is the node removed from the list. * klist_remove() will try to delete the node from the list and block until * it is actually removed. This is useful for objects (like devices) that * have been removed from the system and must be freed (but must wait until * all accessors have finished). */ #include <linux/klist.h> #include <linux/export.h> #include <linux/sched.h> #define KNODE_DEAD 1LU #define KNODE_KLIST_MASK ~KNODE_DEAD static struct klist *knode_klist(struct klist_node *knode) { return (struct klist *) ((unsigned long)knode->n_klist & KNODE_KLIST_MASK); } static bool knode_dead(struct klist_node *knode) { return (unsigned long)knode->n_klist & KNODE_DEAD; } static void knode_set_klist(struct klist_node *knode, struct klist *klist) { knode->n_klist = klist; WARN_ON(knode_dead(knode)); } static void knode_kill(struct klist_node *knode) { WARN_ON(knode_dead(knode)); *(unsigned long *)&knode->n_klist |= KNODE_DEAD; } void klist_init(struct klist *k, void (*get)(struct klist_node *), void (*put)(struct klist_node *)) { INIT_LIST_HEAD(&k->k_list); spin_lock_init(&k->k_lock); k->get = get; k->put = put; } EXPORT_SYMBOL_GPL(klist_init); static void add_head(struct klist *k, struct klist_node *n) { spin_lock(&k->k_lock); list_add(&n->n_node, &k->k_list); spin_unlock(&k->k_lock); } static void add_tail(struct klist *k, struct klist_node *n) { spin_lock(&k->k_lock); list_add_tail(&n->n_node, &k->k_list); spin_unlock(&k->k_lock); } static void klist_node_init(struct klist *k, struct klist_node *n) { INIT_LIST_HEAD(&n->n_node); kref_init(&n->n_ref); knode_set_klist(n, k); if (k->get) k->get(n); } void klist_add_head(struct klist_node *n, struct klist *k) { klist_node_init(k, n); add_head(k, n); } EXPORT_SYMBOL_GPL(klist_add_head); void klist_add_tail(struct klist_node *n, struct klist *k) { klist_node_init(k, n); add_tail(k, n); } EXPORT_SYMBOL_GPL(klist_add_tail); void klist_add_after(struct klist_node *n, struct klist_node *pos) { struct klist *k = knode_klist(pos); klist_node_init(k, n); spin_lock(&k->k_lock); list_add(&n->n_node, &pos->n_node); spin_unlock(&k->k_lock); } EXPORT_SYMBOL_GPL(klist_add_after); void klist_add_before(struct klist_node *n, struct klist_node *pos) { struct klist *k = knode_klist(pos); klist_node_init(k, n); spin_lock(&k->k_lock); list_add_tail(&n->n_node, &pos->n_node); spin_unlock(&k->k_lock); } EXPORT_SYMBOL_GPL(klist_add_before); struct klist_waiter { struct list_head list; struct klist_node *node; struct task_struct *process; int woken; }; static DEFINE_SPINLOCK(klist_remove_lock); static LIST_HEAD(klist_remove_waiters); static void klist_release(struct kref *kref) { struct klist_waiter *waiter, *tmp; struct klist_node *n = container_of(kref, struct klist_node, n_ref); WARN_ON(!knode_dead(n)); list_del(&n->n_node); spin_lock(&klist_remove_lock); list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) { if (waiter->node != n) continue; waiter->woken = 1; mb(); wake_up_process(waiter->process); list_del(&waiter->list); } spin_unlock(&klist_remove_lock); knode_set_klist(n, NULL); } static int klist_dec_and_del(struct klist_node *n) { return kref_put(&n->n_ref, klist_release); } static void klist_put(struct klist_node *n, bool kill) { struct klist *k = knode_klist(n); void (*put)(struct klist_node *) = k->put; spin_lock(&k->k_lock); if (kill) knode_kill(n); if (!klist_dec_and_del(n)) put = NULL; spin_unlock(&k->k_lock); if (put) put(n); } void klist_del(struct klist_node *n) { klist_put(n, true); } EXPORT_SYMBOL_GPL(klist_del); void klist_remove(struct klist_node *n) { struct klist_waiter waiter; waiter.node = n; waiter.process = current; waiter.woken = 0; spin_lock(&klist_remove_lock); list_add(&waiter.list, &klist_remove_waiters); spin_unlock(&klist_remove_lock); klist_del(n); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (waiter.woken) break; schedule(); } __set_current_state(TASK_RUNNING); } EXPORT_SYMBOL_GPL(klist_remove); int klist_node_attached(struct klist_node *n) { return (n->n_klist != NULL); } EXPORT_SYMBOL_GPL(klist_node_attached); void klist_iter_init_node(struct klist *k, struct klist_iter *i, struct klist_node *n) { i->i_klist = k; i->i_cur = n; if (n) kref_get(&n->n_ref); } EXPORT_SYMBOL_GPL(klist_iter_init_node); void klist_iter_init(struct klist *k, struct klist_iter *i) { klist_iter_init_node(k, i, NULL); } EXPORT_SYMBOL_GPL(klist_iter_init); void klist_iter_exit(struct klist_iter *i) { if (i->i_cur) { klist_put(i->i_cur, false); i->i_cur = NULL; } } EXPORT_SYMBOL_GPL(klist_iter_exit); static struct klist_node *to_klist_node(struct list_head *n) { return container_of(n, struct klist_node, n_node); } struct klist_node *klist_next(struct klist_iter *i) { void (*put)(struct klist_node *) = i->i_klist->put; struct klist_node *last = i->i_cur; struct klist_node *next; spin_lock(&i->i_klist->k_lock); if (last) { next = to_klist_node(last->n_node.next); if (!klist_dec_and_del(last)) put = NULL; } else next = to_klist_node(i->i_klist->k_list.next); i->i_cur = NULL; while (next != to_klist_node(&i->i_klist->k_list)) { if (likely(!knode_dead(next))) { kref_get(&next->n_ref); i->i_cur = next; break; } next = to_klist_node(next->n_node.next); } spin_unlock(&i->i_klist->k_lock); if (put && last) put(last); return i->i_cur; } EXPORT_SYMBOL_GPL(klist_next);
gpl-2.0
CyanogenMod/android_kernel_sony_msm8x27
fs/ntfs/file.c
589
67784
/* * file.c - NTFS kernel file operations. Part of the Linux-NTFS project. * * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc. * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/buffer_head.h> #include <linux/gfp.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/sched.h> #include <linux/swap.h> #include <linux/uio.h> #include <linux/writeback.h> #include <asm/page.h> #include <asm/uaccess.h> #include "attrib.h" #include "bitmap.h" #include "inode.h" #include "debug.h" #include "lcnalloc.h" #include "malloc.h" #include "mft.h" #include "ntfs.h" /** * ntfs_file_open - called when an inode is about to be opened * @vi: inode to be opened * @filp: file structure describing the inode * * Limit file size to the page cache limit on architectures where unsigned long * is 32-bits. This is the most we can do for now without overflowing the page * cache page index. Doing it this way means we don't run into problems because * of existing too large files. It would be better to allow the user to read * the beginning of the file but I doubt very much anyone is going to hit this * check on a 32-bit architecture, so there is no point in adding the extra * complexity required to support this. * * On 64-bit architectures, the check is hopefully optimized away by the * compiler. * * After the check passes, just call generic_file_open() to do its work. */ static int ntfs_file_open(struct inode *vi, struct file *filp) { if (sizeof(unsigned long) < 8) { if (i_size_read(vi) > MAX_LFS_FILESIZE) return -EOVERFLOW; } return generic_file_open(vi, filp); } #ifdef NTFS_RW /** * ntfs_attr_extend_initialized - extend the initialized size of an attribute * @ni: ntfs inode of the attribute to extend * @new_init_size: requested new initialized size in bytes * @cached_page: store any allocated but unused page here * @lru_pvec: lru-buffering pagevec of the caller * * Extend the initialized size of an attribute described by the ntfs inode @ni * to @new_init_size bytes. This involves zeroing any non-sparse space between * the old initialized size and @new_init_size both in the page cache and on * disk (if relevant complete pages are already uptodate in the page cache then * these are simply marked dirty). * * As a side-effect, the file size (vfs inode->i_size) may be incremented as, * in the resident attribute case, it is tied to the initialized size and, in * the non-resident attribute case, it may not fall below the initialized size. * * Note that if the attribute is resident, we do not need to touch the page * cache at all. This is because if the page cache page is not uptodate we * bring it uptodate later, when doing the write to the mft record since we * then already have the page mapped. And if the page is uptodate, the * non-initialized region will already have been zeroed when the page was * brought uptodate and the region may in fact already have been overwritten * with new data via mmap() based writes, so we cannot just zero it. And since * POSIX specifies that the behaviour of resizing a file whilst it is mmap()ped * is unspecified, we choose not to do zeroing and thus we do not need to touch * the page at all. For a more detailed explanation see ntfs_truncate() in * fs/ntfs/inode.c. * * Return 0 on success and -errno on error. In the case that an error is * encountered it is possible that the initialized size will already have been * incremented some way towards @new_init_size but it is guaranteed that if * this is the case, the necessary zeroing will also have happened and that all * metadata is self-consistent. * * Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be * held by the caller. */ static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size) { s64 old_init_size; loff_t old_i_size; pgoff_t index, end_index; unsigned long flags; struct inode *vi = VFS_I(ni); ntfs_inode *base_ni; MFT_RECORD *m = NULL; ATTR_RECORD *a; ntfs_attr_search_ctx *ctx = NULL; struct address_space *mapping; struct page *page = NULL; u8 *kattr; int err; u32 attr_len; read_lock_irqsave(&ni->size_lock, flags); old_init_size = ni->initialized_size; old_i_size = i_size_read(vi); BUG_ON(new_init_size > ni->allocated_size); read_unlock_irqrestore(&ni->size_lock, flags); ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, " "old_initialized_size 0x%llx, " "new_initialized_size 0x%llx, i_size 0x%llx.", vi->i_ino, (unsigned)le32_to_cpu(ni->type), (unsigned long long)old_init_size, (unsigned long long)new_init_size, old_i_size); if (!NInoAttr(ni)) base_ni = ni; else base_ni = ni->ext.base_ntfs_ino; /* Use goto to reduce indentation and we need the label below anyway. */ if (NInoNonResident(ni)) goto do_non_resident_extend; BUG_ON(old_init_size != old_i_size); m = map_mft_record(base_ni); if (IS_ERR(m)) { err = PTR_ERR(m); m = NULL; goto err_out; } ctx = ntfs_attr_get_search_ctx(base_ni, m); if (unlikely(!ctx)) { err = -ENOMEM; goto err_out; } err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE, 0, NULL, 0, ctx); if (unlikely(err)) { if (err == -ENOENT) err = -EIO; goto err_out; } m = ctx->mrec; a = ctx->attr; BUG_ON(a->non_resident); /* The total length of the attribute value. */ attr_len = le32_to_cpu(a->data.resident.value_length); BUG_ON(old_i_size != (loff_t)attr_len); /* * Do the zeroing in the mft record and update the attribute size in * the mft record. */ kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset); memset(kattr + attr_len, 0, new_init_size - attr_len); a->data.resident.value_length = cpu_to_le32((u32)new_init_size); /* Finally, update the sizes in the vfs and ntfs inodes. */ write_lock_irqsave(&ni->size_lock, flags); i_size_write(vi, new_init_size); ni->initialized_size = new_init_size; write_unlock_irqrestore(&ni->size_lock, flags); goto done; do_non_resident_extend: /* * If the new initialized size @new_init_size exceeds the current file * size (vfs inode->i_size), we need to extend the file size to the * new initialized size. */ if (new_init_size > old_i_size) { m = map_mft_record(base_ni); if (IS_ERR(m)) { err = PTR_ERR(m); m = NULL; goto err_out; } ctx = ntfs_attr_get_search_ctx(base_ni, m); if (unlikely(!ctx)) { err = -ENOMEM; goto err_out; } err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE, 0, NULL, 0, ctx); if (unlikely(err)) { if (err == -ENOENT) err = -EIO; goto err_out; } m = ctx->mrec; a = ctx->attr; BUG_ON(!a->non_resident); BUG_ON(old_i_size != (loff_t) sle64_to_cpu(a->data.non_resident.data_size)); a->data.non_resident.data_size = cpu_to_sle64(new_init_size); flush_dcache_mft_record_page(ctx->ntfs_ino); mark_mft_record_dirty(ctx->ntfs_ino); /* Update the file size in the vfs inode. */ i_size_write(vi, new_init_size); ntfs_attr_put_search_ctx(ctx); ctx = NULL; unmap_mft_record(base_ni); m = NULL; } mapping = vi->i_mapping; index = old_init_size >> PAGE_CACHE_SHIFT; end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; do { /* * Read the page. If the page is not present, this will zero * the uninitialized regions for us. */ page = read_mapping_page(mapping, index, NULL); if (IS_ERR(page)) { err = PTR_ERR(page); goto init_err_out; } if (unlikely(PageError(page))) { page_cache_release(page); err = -EIO; goto init_err_out; } /* * Update the initialized size in the ntfs inode. This is * enough to make ntfs_writepage() work. */ write_lock_irqsave(&ni->size_lock, flags); ni->initialized_size = (s64)(index + 1) << PAGE_CACHE_SHIFT; if (ni->initialized_size > new_init_size) ni->initialized_size = new_init_size; write_unlock_irqrestore(&ni->size_lock, flags); /* Set the page dirty so it gets written out. */ set_page_dirty(page); page_cache_release(page); /* * Play nice with the vm and the rest of the system. This is * very much needed as we can potentially be modifying the * initialised size from a very small value to a really huge * value, e.g. * f = open(somefile, O_TRUNC); * truncate(f, 10GiB); * seek(f, 10GiB); * write(f, 1); * And this would mean we would be marking dirty hundreds of * thousands of pages or as in the above example more than * two and a half million pages! * * TODO: For sparse pages could optimize this workload by using * the FsMisc / MiscFs page bit as a "PageIsSparse" bit. This * would be set in readpage for sparse pages and here we would * not need to mark dirty any pages which have this bit set. * The only caveat is that we have to clear the bit everywhere * where we allocate any clusters that lie in the page or that * contain the page. * * TODO: An even greater optimization would be for us to only * call readpage() on pages which are not in sparse regions as * determined from the runlist. This would greatly reduce the * number of pages we read and make dirty in the case of sparse * files. */ balance_dirty_pages_ratelimited(mapping); cond_resched(); } while (++index < end_index); read_lock_irqsave(&ni->size_lock, flags); BUG_ON(ni->initialized_size != new_init_size); read_unlock_irqrestore(&ni->size_lock, flags); /* Now bring in sync the initialized_size in the mft record. */ m = map_mft_record(base_ni); if (IS_ERR(m)) { err = PTR_ERR(m); m = NULL; goto init_err_out; } ctx = ntfs_attr_get_search_ctx(base_ni, m); if (unlikely(!ctx)) { err = -ENOMEM; goto init_err_out; } err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE, 0, NULL, 0, ctx); if (unlikely(err)) { if (err == -ENOENT) err = -EIO; goto init_err_out; } m = ctx->mrec; a = ctx->attr; BUG_ON(!a->non_resident); a->data.non_resident.initialized_size = cpu_to_sle64(new_init_size); done: flush_dcache_mft_record_page(ctx->ntfs_ino); mark_mft_record_dirty(ctx->ntfs_ino); if (ctx) ntfs_attr_put_search_ctx(ctx); if (m) unmap_mft_record(base_ni); ntfs_debug("Done, initialized_size 0x%llx, i_size 0x%llx.", (unsigned long long)new_init_size, i_size_read(vi)); return 0; init_err_out: write_lock_irqsave(&ni->size_lock, flags); ni->initialized_size = old_init_size; write_unlock_irqrestore(&ni->size_lock, flags); err_out: if (ctx) ntfs_attr_put_search_ctx(ctx); if (m) unmap_mft_record(base_ni); ntfs_debug("Failed. Returning error code %i.", err); return err; } /** * ntfs_fault_in_pages_readable - * * Fault a number of userspace pages into pagetables. * * Unlike include/linux/pagemap.h::fault_in_pages_readable(), this one copes * with more than two userspace pages as well as handling the single page case * elegantly. * * If you find this difficult to understand, then think of the while loop being * the following code, except that we do without the integer variable ret: * * do { * ret = __get_user(c, uaddr); * uaddr += PAGE_SIZE; * } while (!ret && uaddr < end); * * Note, the final __get_user() may well run out-of-bounds of the user buffer, * but _not_ out-of-bounds of the page the user buffer belongs to, and since * this is only a read and not a write, and since it is still in the same page, * it should not matter and this makes the code much simpler. */ static inline void ntfs_fault_in_pages_readable(const char __user *uaddr, int bytes) { const char __user *end; volatile char c; /* Set @end to the first byte outside the last page we care about. */ end = (const char __user*)PAGE_ALIGN((unsigned long)uaddr + bytes); while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end)) ; } /** * ntfs_fault_in_pages_readable_iovec - * * Same as ntfs_fault_in_pages_readable() but operates on an array of iovecs. */ static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec *iov, size_t iov_ofs, int bytes) { do { const char __user *buf; unsigned len; buf = iov->iov_base + iov_ofs; len = iov->iov_len - iov_ofs; if (len > bytes) len = bytes; ntfs_fault_in_pages_readable(buf, len); bytes -= len; iov++; iov_ofs = 0; } while (bytes); } /** * __ntfs_grab_cache_pages - obtain a number of locked pages * @mapping: address space mapping from which to obtain page cache pages * @index: starting index in @mapping at which to begin obtaining pages * @nr_pages: number of page cache pages to obtain * @pages: array of pages in which to return the obtained page cache pages * @cached_page: allocated but as yet unused page * @lru_pvec: lru-buffering pagevec of caller * * Obtain @nr_pages locked page cache pages from the mapping @mapping and * starting at index @index. * * If a page is newly created, add it to lru list * * Note, the page locks are obtained in ascending page index order. */ static inline int __ntfs_grab_cache_pages(struct address_space *mapping, pgoff_t index, const unsigned nr_pages, struct page **pages, struct page **cached_page) { int err, nr; BUG_ON(!nr_pages); err = nr = 0; do { pages[nr] = find_lock_page(mapping, index); if (!pages[nr]) { if (!*cached_page) { *cached_page = page_cache_alloc(mapping); if (unlikely(!*cached_page)) { err = -ENOMEM; goto err_out; } } err = add_to_page_cache_lru(*cached_page, mapping, index, GFP_KERNEL); if (unlikely(err)) { if (err == -EEXIST) continue; goto err_out; } pages[nr] = *cached_page; *cached_page = NULL; } index++; nr++; } while (nr < nr_pages); out: return err; err_out: while (nr > 0) { unlock_page(pages[--nr]); page_cache_release(pages[nr]); } goto out; } static inline int ntfs_submit_bh_for_read(struct buffer_head *bh) { lock_buffer(bh); get_bh(bh); bh->b_end_io = end_buffer_read_sync; return submit_bh(READ, bh); } /** * ntfs_prepare_pages_for_non_resident_write - prepare pages for receiving data * @pages: array of destination pages * @nr_pages: number of pages in @pages * @pos: byte position in file at which the write begins * @bytes: number of bytes to be written * * This is called for non-resident attributes from ntfs_file_buffered_write() * with i_mutex held on the inode (@pages[0]->mapping->host). There are * @nr_pages pages in @pages which are locked but not kmap()ped. The source * data has not yet been copied into the @pages. * * Need to fill any holes with actual clusters, allocate buffers if necessary, * ensure all the buffers are mapped, and bring uptodate any buffers that are * only partially being written to. * * If @nr_pages is greater than one, we are guaranteed that the cluster size is * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside * the same cluster and that they are the entirety of that cluster, and that * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole. * * i_size is not to be modified yet. * * Return 0 on success or -errno on error. */ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, unsigned nr_pages, s64 pos, size_t bytes) { VCN vcn, highest_vcn = 0, cpos, cend, bh_cpos, bh_cend; LCN lcn; s64 bh_pos, vcn_len, end, initialized_size; sector_t lcn_block; struct page *page; struct inode *vi; ntfs_inode *ni, *base_ni = NULL; ntfs_volume *vol; runlist_element *rl, *rl2; struct buffer_head *bh, *head, *wait[2], **wait_bh = wait; ntfs_attr_search_ctx *ctx = NULL; MFT_RECORD *m = NULL; ATTR_RECORD *a = NULL; unsigned long flags; u32 attr_rec_len = 0; unsigned blocksize, u; int err, mp_size; bool rl_write_locked, was_hole, is_retry; unsigned char blocksize_bits; struct { u8 runlist_merged:1; u8 mft_attr_mapped:1; u8 mp_rebuilt:1; u8 attr_switched:1; } status = { 0, 0, 0, 0 }; BUG_ON(!nr_pages); BUG_ON(!pages); BUG_ON(!*pages); vi = pages[0]->mapping->host; ni = NTFS_I(vi); vol = ni->vol; ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page " "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.", vi->i_ino, ni->type, pages[0]->index, nr_pages, (long long)pos, bytes); blocksize = vol->sb->s_blocksize; blocksize_bits = vol->sb->s_blocksize_bits; u = 0; do { page = pages[u]; BUG_ON(!page); /* * create_empty_buffers() will create uptodate/dirty buffers if * the page is uptodate/dirty. */ if (!page_has_buffers(page)) { create_empty_buffers(page, blocksize, 0); if (unlikely(!page_has_buffers(page))) return -ENOMEM; } } while (++u < nr_pages); rl_write_locked = false; rl = NULL; err = 0; vcn = lcn = -1; vcn_len = 0; lcn_block = -1; was_hole = false; cpos = pos >> vol->cluster_size_bits; end = pos + bytes; cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits; /* * Loop over each page and for each page over each buffer. Use goto to * reduce indentation. */ u = 0; do_next_page: page = pages[u]; bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; bh = head = page_buffers(page); do { VCN cdelta; s64 bh_end; unsigned bh_cofs; /* Clear buffer_new on all buffers to reinitialise state. */ if (buffer_new(bh)) clear_buffer_new(bh); bh_end = bh_pos + blocksize; bh_cpos = bh_pos >> vol->cluster_size_bits; bh_cofs = bh_pos & vol->cluster_size_mask; if (buffer_mapped(bh)) { /* * The buffer is already mapped. If it is uptodate, * ignore it. */ if (buffer_uptodate(bh)) continue; /* * The buffer is not uptodate. If the page is uptodate * set the buffer uptodate and otherwise ignore it. */ if (PageUptodate(page)) { set_buffer_uptodate(bh); continue; } /* * Neither the page nor the buffer are uptodate. If * the buffer is only partially being written to, we * need to read it in before the write, i.e. now. */ if ((bh_pos < pos && bh_end > pos) || (bh_pos < end && bh_end > end)) { /* * If the buffer is fully or partially within * the initialized size, do an actual read. * Otherwise, simply zero the buffer. */ read_lock_irqsave(&ni->size_lock, flags); initialized_size = ni->initialized_size; read_unlock_irqrestore(&ni->size_lock, flags); if (bh_pos < initialized_size) { ntfs_submit_bh_for_read(bh); *wait_bh++ = bh; } else { zero_user(page, bh_offset(bh), blocksize); set_buffer_uptodate(bh); } } continue; } /* Unmapped buffer. Need to map it. */ bh->b_bdev = vol->sb->s_bdev; /* * If the current buffer is in the same clusters as the map * cache, there is no need to check the runlist again. The * map cache is made up of @vcn, which is the first cached file * cluster, @vcn_len which is the number of cached file * clusters, @lcn is the device cluster corresponding to @vcn, * and @lcn_block is the block number corresponding to @lcn. */ cdelta = bh_cpos - vcn; if (likely(!cdelta || (cdelta > 0 && cdelta < vcn_len))) { map_buffer_cached: BUG_ON(lcn < 0); bh->b_blocknr = lcn_block + (cdelta << (vol->cluster_size_bits - blocksize_bits)) + (bh_cofs >> blocksize_bits); set_buffer_mapped(bh); /* * If the page is uptodate so is the buffer. If the * buffer is fully outside the write, we ignore it if * it was already allocated and we mark it dirty so it * gets written out if we allocated it. On the other * hand, if we allocated the buffer but we are not * marking it dirty we set buffer_new so we can do * error recovery. */ if (PageUptodate(page)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); if (unlikely(was_hole)) { /* We allocated the buffer. */ unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); if (bh_end <= pos || bh_pos >= end) mark_buffer_dirty(bh); else set_buffer_new(bh); } continue; } /* Page is _not_ uptodate. */ if (likely(!was_hole)) { /* * Buffer was already allocated. If it is not * uptodate and is only partially being written * to, we need to read it in before the write, * i.e. now. */ if (!buffer_uptodate(bh) && bh_pos < end && bh_end > pos && (bh_pos < pos || bh_end > end)) { /* * If the buffer is fully or partially * within the initialized size, do an * actual read. Otherwise, simply zero * the buffer. */ read_lock_irqsave(&ni->size_lock, flags); initialized_size = ni->initialized_size; read_unlock_irqrestore(&ni->size_lock, flags); if (bh_pos < initialized_size) { ntfs_submit_bh_for_read(bh); *wait_bh++ = bh; } else { zero_user(page, bh_offset(bh), blocksize); set_buffer_uptodate(bh); } } continue; } /* We allocated the buffer. */ unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); /* * If the buffer is fully outside the write, zero it, * set it uptodate, and mark it dirty so it gets * written out. If it is partially being written to, * zero region surrounding the write but leave it to * commit write to do anything else. Finally, if the * buffer is fully being overwritten, do nothing. */ if (bh_end <= pos || bh_pos >= end) { if (!buffer_uptodate(bh)) { zero_user(page, bh_offset(bh), blocksize); set_buffer_uptodate(bh); } mark_buffer_dirty(bh); continue; } set_buffer_new(bh); if (!buffer_uptodate(bh) && (bh_pos < pos || bh_end > end)) { u8 *kaddr; unsigned pofs; kaddr = kmap_atomic(page); if (bh_pos < pos) { pofs = bh_pos & ~PAGE_CACHE_MASK; memset(kaddr + pofs, 0, pos - bh_pos); } if (bh_end > end) { pofs = end & ~PAGE_CACHE_MASK; memset(kaddr + pofs, 0, bh_end - end); } kunmap_atomic(kaddr); flush_dcache_page(page); } continue; } /* * Slow path: this is the first buffer in the cluster. If it * is outside allocated size and is not uptodate, zero it and * set it uptodate. */ read_lock_irqsave(&ni->size_lock, flags); initialized_size = ni->allocated_size; read_unlock_irqrestore(&ni->size_lock, flags); if (bh_pos > initialized_size) { if (PageUptodate(page)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); } else if (!buffer_uptodate(bh)) { zero_user(page, bh_offset(bh), blocksize); set_buffer_uptodate(bh); } continue; } is_retry = false; if (!rl) { down_read(&ni->runlist.lock); retry_remap: rl = ni->runlist.rl; } if (likely(rl != NULL)) { /* Seek to element containing target cluster. */ while (rl->length && rl[1].vcn <= bh_cpos) rl++; lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos); if (likely(lcn >= 0)) { /* * Successful remap, setup the map cache and * use that to deal with the buffer. */ was_hole = false; vcn = bh_cpos; vcn_len = rl[1].vcn - vcn; lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits); cdelta = 0; /* * If the number of remaining clusters touched * by the write is smaller or equal to the * number of cached clusters, unlock the * runlist as the map cache will be used from * now on. */ if (likely(vcn + vcn_len >= cend)) { if (rl_write_locked) { up_write(&ni->runlist.lock); rl_write_locked = false; } else up_read(&ni->runlist.lock); rl = NULL; } goto map_buffer_cached; } } else lcn = LCN_RL_NOT_MAPPED; /* * If it is not a hole and not out of bounds, the runlist is * probably unmapped so try to map it now. */ if (unlikely(lcn != LCN_HOLE && lcn != LCN_ENOENT)) { if (likely(!is_retry && lcn == LCN_RL_NOT_MAPPED)) { /* Attempt to map runlist. */ if (!rl_write_locked) { /* * We need the runlist locked for * writing, so if it is locked for * reading relock it now and retry in * case it changed whilst we dropped * the lock. */ up_read(&ni->runlist.lock); down_write(&ni->runlist.lock); rl_write_locked = true; goto retry_remap; } err = ntfs_map_runlist_nolock(ni, bh_cpos, NULL); if (likely(!err)) { is_retry = true; goto retry_remap; } /* * If @vcn is out of bounds, pretend @lcn is * LCN_ENOENT. As long as the buffer is out * of bounds this will work fine. */ if (err == -ENOENT) { lcn = LCN_ENOENT; err = 0; goto rl_not_mapped_enoent; } } else err = -EIO; /* Failed to map the buffer, even after retrying. */ bh->b_blocknr = -1; ntfs_error(vol->sb, "Failed to write to inode 0x%lx, " "attribute type 0x%x, vcn 0x%llx, " "vcn offset 0x%x, because its " "location on disk could not be " "determined%s (error code %i).", ni->mft_no, ni->type, (unsigned long long)bh_cpos, (unsigned)bh_pos & vol->cluster_size_mask, is_retry ? " even after retrying" : "", err); break; } rl_not_mapped_enoent: /* * The buffer is in a hole or out of bounds. We need to fill * the hole, unless the buffer is in a cluster which is not * touched by the write, in which case we just leave the buffer * unmapped. This can only happen when the cluster size is * less than the page cache size. */ if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) { bh_cend = (bh_end + vol->cluster_size - 1) >> vol->cluster_size_bits; if ((bh_cend <= cpos || bh_cpos >= cend)) { bh->b_blocknr = -1; /* * If the buffer is uptodate we skip it. If it * is not but the page is uptodate, we can set * the buffer uptodate. If the page is not * uptodate, we can clear the buffer and set it * uptodate. Whether this is worthwhile is * debatable and this could be removed. */ if (PageUptodate(page)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); } else if (!buffer_uptodate(bh)) { zero_user(page, bh_offset(bh), blocksize); set_buffer_uptodate(bh); } continue; } } /* * Out of bounds buffer is invalid if it was not really out of * bounds. */ BUG_ON(lcn != LCN_HOLE); /* * We need the runlist locked for writing, so if it is locked * for reading relock it now and retry in case it changed * whilst we dropped the lock. */ BUG_ON(!rl); if (!rl_write_locked) { up_read(&ni->runlist.lock); down_write(&ni->runlist.lock); rl_write_locked = true; goto retry_remap; } /* Find the previous last allocated cluster. */ BUG_ON(rl->lcn != LCN_HOLE); lcn = -1; rl2 = rl; while (--rl2 >= ni->runlist.rl) { if (rl2->lcn >= 0) { lcn = rl2->lcn + rl2->length; break; } } rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE, false); if (IS_ERR(rl2)) { err = PTR_ERR(rl2); ntfs_debug("Failed to allocate cluster, error code %i.", err); break; } lcn = rl2->lcn; rl = ntfs_runlists_merge(ni->runlist.rl, rl2); if (IS_ERR(rl)) { err = PTR_ERR(rl); if (err != -ENOMEM) err = -EIO; if (ntfs_cluster_free_from_rl(vol, rl2)) { ntfs_error(vol->sb, "Failed to release " "allocated cluster in error " "code path. Run chkdsk to " "recover the lost cluster."); NVolSetErrors(vol); } ntfs_free(rl2); break; } ni->runlist.rl = rl; status.runlist_merged = 1; ntfs_debug("Allocated cluster, lcn 0x%llx.", (unsigned long long)lcn); /* Map and lock the mft record and get the attribute record. */ if (!NInoAttr(ni)) base_ni = ni; else base_ni = ni->ext.base_ntfs_ino; m = map_mft_record(base_ni); if (IS_ERR(m)) { err = PTR_ERR(m); break; } ctx = ntfs_attr_get_search_ctx(base_ni, m); if (unlikely(!ctx)) { err = -ENOMEM; unmap_mft_record(base_ni); break; } status.mft_attr_mapped = 1; err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE, bh_cpos, NULL, 0, ctx); if (unlikely(err)) { if (err == -ENOENT) err = -EIO; break; } m = ctx->mrec; a = ctx->attr; /* * Find the runlist element with which the attribute extent * starts. Note, we cannot use the _attr_ version because we * have mapped the mft record. That is ok because we know the * runlist fragment must be mapped already to have ever gotten * here, so we can just use the _rl_ version. */ vcn = sle64_to_cpu(a->data.non_resident.lowest_vcn); rl2 = ntfs_rl_find_vcn_nolock(rl, vcn); BUG_ON(!rl2); BUG_ON(!rl2->length); BUG_ON(rl2->lcn < LCN_HOLE); highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn); /* * If @highest_vcn is zero, calculate the real highest_vcn * (which can really be zero). */ if (!highest_vcn) highest_vcn = (sle64_to_cpu( a->data.non_resident.allocated_size) >> vol->cluster_size_bits) - 1; /* * Determine the size of the mapping pairs array for the new * extent, i.e. the old extent with the hole filled. */ mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, vcn, highest_vcn); if (unlikely(mp_size <= 0)) { if (!(err = mp_size)) err = -EIO; ntfs_debug("Failed to get size for mapping pairs " "array, error code %i.", err); break; } /* * Resize the attribute record to fit the new mapping pairs * array. */ attr_rec_len = le32_to_cpu(a->length); err = ntfs_attr_record_resize(m, a, mp_size + le16_to_cpu( a->data.non_resident.mapping_pairs_offset)); if (unlikely(err)) { BUG_ON(err != -ENOSPC); // TODO: Deal with this by using the current attribute // and fill it with as much of the mapping pairs // array as possible. Then loop over each attribute // extent rewriting the mapping pairs arrays as we go // along and if when we reach the end we have not // enough space, try to resize the last attribute // extent and if even that fails, add a new attribute // extent. // We could also try to resize at each step in the hope // that we will not need to rewrite every single extent. // Note, we may need to decompress some extents to fill // the runlist as we are walking the extents... ntfs_error(vol->sb, "Not enough space in the mft " "record for the extended attribute " "record. This case is not " "implemented yet."); err = -EOPNOTSUPP; break ; } status.mp_rebuilt = 1; /* * Generate the mapping pairs array directly into the attribute * record. */ err = ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu( a->data.non_resident.mapping_pairs_offset), mp_size, rl2, vcn, highest_vcn, NULL); if (unlikely(err)) { ntfs_error(vol->sb, "Cannot fill hole in inode 0x%lx, " "attribute type 0x%x, because building " "the mapping pairs failed with error " "code %i.", vi->i_ino, (unsigned)le32_to_cpu(ni->type), err); err = -EIO; break; } /* Update the highest_vcn but only if it was not set. */ if (unlikely(!a->data.non_resident.highest_vcn)) a->data.non_resident.highest_vcn = cpu_to_sle64(highest_vcn); /* * If the attribute is sparse/compressed, update the compressed * size in the ntfs_inode structure and the attribute record. */ if (likely(NInoSparse(ni) || NInoCompressed(ni))) { /* * If we are not in the first attribute extent, switch * to it, but first ensure the changes will make it to * disk later. */ if (a->data.non_resident.lowest_vcn) { flush_dcache_mft_record_page(ctx->ntfs_ino); mark_mft_record_dirty(ctx->ntfs_ino); ntfs_attr_reinit_search_ctx(ctx); err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE, 0, NULL, 0, ctx); if (unlikely(err)) { status.attr_switched = 1; break; } /* @m is not used any more so do not set it. */ a = ctx->attr; } write_lock_irqsave(&ni->size_lock, flags); ni->itype.compressed.size += vol->cluster_size; a->data.non_resident.compressed_size = cpu_to_sle64(ni->itype.compressed.size); write_unlock_irqrestore(&ni->size_lock, flags); } /* Ensure the changes make it to disk. */ flush_dcache_mft_record_page(ctx->ntfs_ino); mark_mft_record_dirty(ctx->ntfs_ino); ntfs_attr_put_search_ctx(ctx); unmap_mft_record(base_ni); /* Successfully filled the hole. */ status.runlist_merged = 0; status.mft_attr_mapped = 0; status.mp_rebuilt = 0; /* Setup the map cache and use that to deal with the buffer. */ was_hole = true; vcn = bh_cpos; vcn_len = 1; lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits); cdelta = 0; /* * If the number of remaining clusters in the @pages is smaller * or equal to the number of cached clusters, unlock the * runlist as the map cache will be used from now on. */ if (likely(vcn + vcn_len >= cend)) { up_write(&ni->runlist.lock); rl_write_locked = false; rl = NULL; } goto map_buffer_cached; } while (bh_pos += blocksize, (bh = bh->b_this_page) != head); /* If there are no errors, do the next page. */ if (likely(!err && ++u < nr_pages)) goto do_next_page; /* If there are no errors, release the runlist lock if we took it. */ if (likely(!err)) { if (unlikely(rl_write_locked)) { up_write(&ni->runlist.lock); rl_write_locked = false; } else if (unlikely(rl)) up_read(&ni->runlist.lock); rl = NULL; } /* If we issued read requests, let them complete. */ read_lock_irqsave(&ni->size_lock, flags); initialized_size = ni->initialized_size; read_unlock_irqrestore(&ni->size_lock, flags); while (wait_bh > wait) { bh = *--wait_bh; wait_on_buffer(bh); if (likely(buffer_uptodate(bh))) { page = bh->b_page; bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); /* * If the buffer overflows the initialized size, need * to zero the overflowing region. */ if (unlikely(bh_pos + blocksize > initialized_size)) { int ofs = 0; if (likely(bh_pos < initialized_size)) ofs = initialized_size - bh_pos; zero_user_segment(page, bh_offset(bh) + ofs, blocksize); } } else /* if (unlikely(!buffer_uptodate(bh))) */ err = -EIO; } if (likely(!err)) { /* Clear buffer_new on all buffers. */ u = 0; do { bh = head = page_buffers(pages[u]); do { if (buffer_new(bh)) clear_buffer_new(bh); } while ((bh = bh->b_this_page) != head); } while (++u < nr_pages); ntfs_debug("Done."); return err; } if (status.attr_switched) { /* Get back to the attribute extent we modified. */ ntfs_attr_reinit_search_ctx(ctx); if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE, bh_cpos, NULL, 0, ctx)) { ntfs_error(vol->sb, "Failed to find required " "attribute extent of attribute in " "error code path. Run chkdsk to " "recover."); write_lock_irqsave(&ni->size_lock, flags); ni->itype.compressed.size += vol->cluster_size; write_unlock_irqrestore(&ni->size_lock, flags); flush_dcache_mft_record_page(ctx->ntfs_ino); mark_mft_record_dirty(ctx->ntfs_ino); /* * The only thing that is now wrong is the compressed * size of the base attribute extent which chkdsk * should be able to fix. */ NVolSetErrors(vol); } else { m = ctx->mrec; a = ctx->attr; status.attr_switched = 0; } } /* * If the runlist has been modified, need to restore it by punching a * hole into it and we then need to deallocate the on-disk cluster as * well. Note, we only modify the runlist if we are able to generate a * new mapping pairs array, i.e. only when the mapped attribute extent * is not switched. */ if (status.runlist_merged && !status.attr_switched) { BUG_ON(!rl_write_locked); /* Make the file cluster we allocated sparse in the runlist. */ if (ntfs_rl_punch_nolock(vol, &ni->runlist, bh_cpos, 1)) { ntfs_error(vol->sb, "Failed to punch hole into " "attribute runlist in error code " "path. Run chkdsk to recover the " "lost cluster."); NVolSetErrors(vol); } else /* if (success) */ { status.runlist_merged = 0; /* * Deallocate the on-disk cluster we allocated but only * if we succeeded in punching its vcn out of the * runlist. */ down_write(&vol->lcnbmp_lock); if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) { ntfs_error(vol->sb, "Failed to release " "allocated cluster in error " "code path. Run chkdsk to " "recover the lost cluster."); NVolSetErrors(vol); } up_write(&vol->lcnbmp_lock); } } /* * Resize the attribute record to its old size and rebuild the mapping * pairs array. Note, we only can do this if the runlist has been * restored to its old state which also implies that the mapped * attribute extent is not switched. */ if (status.mp_rebuilt && !status.runlist_merged) { if (ntfs_attr_record_resize(m, a, attr_rec_len)) { ntfs_error(vol->sb, "Failed to restore attribute " "record in error code path. Run " "chkdsk to recover."); NVolSetErrors(vol); } else /* if (success) */ { if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(a->data.non_resident. mapping_pairs_offset), attr_rec_len - le16_to_cpu(a->data.non_resident. mapping_pairs_offset), ni->runlist.rl, vcn, highest_vcn, NULL)) { ntfs_error(vol->sb, "Failed to restore " "mapping pairs array in error " "code path. Run chkdsk to " "recover."); NVolSetErrors(vol); } flush_dcache_mft_record_page(ctx->ntfs_ino); mark_mft_record_dirty(ctx->ntfs_ino); } } /* Release the mft record and the attribute. */ if (status.mft_attr_mapped) { ntfs_attr_put_search_ctx(ctx); unmap_mft_record(base_ni); } /* Release the runlist lock. */ if (rl_write_locked) up_write(&ni->runlist.lock); else if (rl) up_read(&ni->runlist.lock); /* * Zero out any newly allocated blocks to avoid exposing stale data. * If BH_New is set, we know that the block was newly allocated above * and that it has not been fully zeroed and marked dirty yet. */ nr_pages = u; u = 0; end = bh_cpos << vol->cluster_size_bits; do { page = pages[u]; bh = head = page_buffers(page); do { if (u == nr_pages && ((s64)page->index << PAGE_CACHE_SHIFT) + bh_offset(bh) >= end) break; if (!buffer_new(bh)) continue; clear_buffer_new(bh); if (!buffer_uptodate(bh)) { if (PageUptodate(page)) set_buffer_uptodate(bh); else { zero_user(page, bh_offset(bh), blocksize); set_buffer_uptodate(bh); } } mark_buffer_dirty(bh); } while ((bh = bh->b_this_page) != head); } while (++u <= nr_pages); ntfs_error(vol->sb, "Failed. Returning error code %i.", err); return err; } /* * Copy as much as we can into the pages and return the number of bytes which * were successfully copied. If a fault is encountered then clear the pages * out to (ofs + bytes) and return the number of bytes which were copied. */ static inline size_t ntfs_copy_from_user(struct page **pages, unsigned nr_pages, unsigned ofs, const char __user *buf, size_t bytes) { struct page **last_page = pages + nr_pages; char *addr; size_t total = 0; unsigned len; int left; do { len = PAGE_CACHE_SIZE - ofs; if (len > bytes) len = bytes; addr = kmap_atomic(*pages); left = __copy_from_user_inatomic(addr + ofs, buf, len); kunmap_atomic(addr); if (unlikely(left)) { /* Do it the slow way. */ addr = kmap(*pages); left = __copy_from_user(addr + ofs, buf, len); kunmap(*pages); if (unlikely(left)) goto err_out; } total += len; bytes -= len; if (!bytes) break; buf += len; ofs = 0; } while (++pages < last_page); out: return total; err_out: total += len - left; /* Zero the rest of the target like __copy_from_user(). */ while (++pages < last_page) { bytes -= len; if (!bytes) break; len = PAGE_CACHE_SIZE; if (len > bytes) len = bytes; zero_user(*pages, 0, len); } goto out; } static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr, const struct iovec *iov, size_t iov_ofs, size_t bytes) { size_t total = 0; while (1) { const char __user *buf = iov->iov_base + iov_ofs; unsigned len; size_t left; len = iov->iov_len - iov_ofs; if (len > bytes) len = bytes; left = __copy_from_user_inatomic(vaddr, buf, len); total += len; bytes -= len; vaddr += len; if (unlikely(left)) { total -= left; break; } if (!bytes) break; iov++; iov_ofs = 0; } return total; } static inline void ntfs_set_next_iovec(const struct iovec **iovp, size_t *iov_ofsp, size_t bytes) { const struct iovec *iov = *iovp; size_t iov_ofs = *iov_ofsp; while (bytes) { unsigned len; len = iov->iov_len - iov_ofs; if (len > bytes) len = bytes; bytes -= len; iov_ofs += len; if (iov->iov_len == iov_ofs) { iov++; iov_ofs = 0; } } *iovp = iov; *iov_ofsp = iov_ofs; } /* * This has the same side-effects and return value as ntfs_copy_from_user(). * The difference is that on a fault we need to memset the remainder of the * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s * single-segment behaviour. * * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both when * atomic and when not atomic. This is ok because it calls * __copy_from_user_inatomic() and it is ok to call this when non-atomic. In * fact, the only difference between __copy_from_user_inatomic() and * __copy_from_user() is that the latter calls might_sleep() and the former * should not zero the tail of the buffer on error. And on many architectures * __copy_from_user_inatomic() is just defined to __copy_from_user() so it * makes no difference at all on those architectures. */ static inline size_t ntfs_copy_from_user_iovec(struct page **pages, unsigned nr_pages, unsigned ofs, const struct iovec **iov, size_t *iov_ofs, size_t bytes) { struct page **last_page = pages + nr_pages; char *addr; size_t copied, len, total = 0; do { len = PAGE_CACHE_SIZE - ofs; if (len > bytes) len = bytes; addr = kmap_atomic(*pages); copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs, *iov, *iov_ofs, len); kunmap_atomic(addr); if (unlikely(copied != len)) { /* Do it the slow way. */ addr = kmap(*pages); copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs, *iov, *iov_ofs, len); if (unlikely(copied != len)) goto err_out; kunmap(*pages); } total += len; ntfs_set_next_iovec(iov, iov_ofs, len); bytes -= len; if (!bytes) break; ofs = 0; } while (++pages < last_page); out: return total; err_out: BUG_ON(copied > len); /* Zero the rest of the target like __copy_from_user(). */ memset(addr + ofs + copied, 0, len - copied); kunmap(*pages); total += copied; ntfs_set_next_iovec(iov, iov_ofs, copied); while (++pages < last_page) { bytes -= len; if (!bytes) break; len = PAGE_CACHE_SIZE; if (len > bytes) len = bytes; zero_user(*pages, 0, len); } goto out; } static inline void ntfs_flush_dcache_pages(struct page **pages, unsigned nr_pages) { BUG_ON(!nr_pages); /* * Warning: Do not do the decrement at the same time as the call to * flush_dcache_page() because it is a NULL macro on i386 and hence the * decrement never happens so the loop never terminates. */ do { --nr_pages; flush_dcache_page(pages[nr_pages]); } while (nr_pages > 0); } /** * ntfs_commit_pages_after_non_resident_write - commit the received data * @pages: array of destination pages * @nr_pages: number of pages in @pages * @pos: byte position in file at which the write begins * @bytes: number of bytes to be written * * See description of ntfs_commit_pages_after_write(), below. */ static inline int ntfs_commit_pages_after_non_resident_write( struct page **pages, const unsigned nr_pages, s64 pos, size_t bytes) { s64 end, initialized_size; struct inode *vi; ntfs_inode *ni, *base_ni; struct buffer_head *bh, *head; ntfs_attr_search_ctx *ctx; MFT_RECORD *m; ATTR_RECORD *a; unsigned long flags; unsigned blocksize, u; int err; vi = pages[0]->mapping->host; ni = NTFS_I(vi); blocksize = vi->i_sb->s_blocksize; end = pos + bytes; u = 0; do { s64 bh_pos; struct page *page; bool partial; page = pages[u]; bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; bh = head = page_buffers(page); partial = false; do { s64 bh_end; bh_end = bh_pos + blocksize; if (bh_end <= pos || bh_pos >= end) { if (!buffer_uptodate(bh)) partial = true; } else { set_buffer_uptodate(bh); mark_buffer_dirty(bh); } } while (bh_pos += blocksize, (bh = bh->b_this_page) != head); /* * If all buffers are now uptodate but the page is not, set the * page uptodate. */ if (!partial && !PageUptodate(page)) SetPageUptodate(page); } while (++u < nr_pages); /* * Finally, if we do not need to update initialized_size or i_size we * are finished. */ read_lock_irqsave(&ni->size_lock, flags); initialized_size = ni->initialized_size; read_unlock_irqrestore(&ni->size_lock, flags); if (end <= initialized_size) { ntfs_debug("Done."); return 0; } /* * Update initialized_size/i_size as appropriate, both in the inode and * the mft record. */ if (!NInoAttr(ni)) base_ni = ni; else base_ni = ni->ext.base_ntfs_ino; /* Map, pin, and lock the mft record. */ m = map_mft_record(base_ni); if (IS_ERR(m)) { err = PTR_ERR(m); m = NULL; ctx = NULL; goto err_out; } BUG_ON(!NInoNonResident(ni)); ctx = ntfs_attr_get_search_ctx(base_ni, m); if (unlikely(!ctx)) { err = -ENOMEM; goto err_out; } err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE, 0, NULL, 0, ctx); if (unlikely(err)) { if (err == -ENOENT) err = -EIO; goto err_out; } a = ctx->attr; BUG_ON(!a->non_resident); write_lock_irqsave(&ni->size_lock, flags); BUG_ON(end > ni->allocated_size); ni->initialized_size = end; a->data.non_resident.initialized_size = cpu_to_sle64(end); if (end > i_size_read(vi)) { i_size_write(vi, end); a->data.non_resident.data_size = a->data.non_resident.initialized_size; } write_unlock_irqrestore(&ni->size_lock, flags); /* Mark the mft record dirty, so it gets written back. */ flush_dcache_mft_record_page(ctx->ntfs_ino); mark_mft_record_dirty(ctx->ntfs_ino); ntfs_attr_put_search_ctx(ctx); unmap_mft_record(base_ni); ntfs_debug("Done."); return 0; err_out: if (ctx) ntfs_attr_put_search_ctx(ctx); if (m) unmap_mft_record(base_ni); ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error " "code %i).", err); if (err != -ENOMEM) NVolSetErrors(ni->vol); return err; } /** * ntfs_commit_pages_after_write - commit the received data * @pages: array of destination pages * @nr_pages: number of pages in @pages * @pos: byte position in file at which the write begins * @bytes: number of bytes to be written * * This is called from ntfs_file_buffered_write() with i_mutex held on the inode * (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are * locked but not kmap()ped. The source data has already been copied into the * @page. ntfs_prepare_pages_for_non_resident_write() has been called before * the data was copied (for non-resident attributes only) and it returned * success. * * Need to set uptodate and mark dirty all buffers within the boundary of the * write. If all buffers in a page are uptodate we set the page uptodate, too. * * Setting the buffers dirty ensures that they get written out later when * ntfs_writepage() is invoked by the VM. * * Finally, we need to update i_size and initialized_size as appropriate both * in the inode and the mft record. * * This is modelled after fs/buffer.c::generic_commit_write(), which marks * buffers uptodate and dirty, sets the page uptodate if all buffers in the * page are uptodate, and updates i_size if the end of io is beyond i_size. In * that case, it also marks the inode dirty. * * If things have gone as outlined in * ntfs_prepare_pages_for_non_resident_write(), we do not need to do any page * content modifications here for non-resident attributes. For resident * attributes we need to do the uptodate bringing here which we combine with * the copying into the mft record which means we save one atomic kmap. * * Return 0 on success or -errno on error. */ static int ntfs_commit_pages_after_write(struct page **pages, const unsigned nr_pages, s64 pos, size_t bytes) { s64 end, initialized_size; loff_t i_size; struct inode *vi; ntfs_inode *ni, *base_ni; struct page *page; ntfs_attr_search_ctx *ctx; MFT_RECORD *m; ATTR_RECORD *a; char *kattr, *kaddr; unsigned long flags; u32 attr_len; int err; BUG_ON(!nr_pages); BUG_ON(!pages); page = pages[0]; BUG_ON(!page); vi = page->mapping->host; ni = NTFS_I(vi); ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page " "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.", vi->i_ino, ni->type, page->index, nr_pages, (long long)pos, bytes); if (NInoNonResident(ni)) return ntfs_commit_pages_after_non_resident_write(pages, nr_pages, pos, bytes); BUG_ON(nr_pages > 1); /* * Attribute is resident, implying it is not compressed, encrypted, or * sparse. */ if (!NInoAttr(ni)) base_ni = ni; else base_ni = ni->ext.base_ntfs_ino; BUG_ON(NInoNonResident(ni)); /* Map, pin, and lock the mft record. */ m = map_mft_record(base_ni); if (IS_ERR(m)) { err = PTR_ERR(m); m = NULL; ctx = NULL; goto err_out; } ctx = ntfs_attr_get_search_ctx(base_ni, m); if (unlikely(!ctx)) { err = -ENOMEM; goto err_out; } err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE, 0, NULL, 0, ctx); if (unlikely(err)) { if (err == -ENOENT) err = -EIO; goto err_out; } a = ctx->attr; BUG_ON(a->non_resident); /* The total length of the attribute value. */ attr_len = le32_to_cpu(a->data.resident.value_length); i_size = i_size_read(vi); BUG_ON(attr_len != i_size); BUG_ON(pos > attr_len); end = pos + bytes; BUG_ON(end > le32_to_cpu(a->length) - le16_to_cpu(a->data.resident.value_offset)); kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset); kaddr = kmap_atomic(page); /* Copy the received data from the page to the mft record. */ memcpy(kattr + pos, kaddr + pos, bytes); /* Update the attribute length if necessary. */ if (end > attr_len) { attr_len = end; a->data.resident.value_length = cpu_to_le32(attr_len); } /* * If the page is not uptodate, bring the out of bounds area(s) * uptodate by copying data from the mft record to the page. */ if (!PageUptodate(page)) { if (pos > 0) memcpy(kaddr, kattr, pos); if (end < attr_len) memcpy(kaddr + end, kattr + end, attr_len - end); /* Zero the region outside the end of the attribute value. */ memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); flush_dcache_page(page); SetPageUptodate(page); } kunmap_atomic(kaddr); /* Update initialized_size/i_size if necessary. */ read_lock_irqsave(&ni->size_lock, flags); initialized_size = ni->initialized_size; BUG_ON(end > ni->allocated_size); read_unlock_irqrestore(&ni->size_lock, flags); BUG_ON(initialized_size != i_size); if (end > initialized_size) { write_lock_irqsave(&ni->size_lock, flags); ni->initialized_size = end; i_size_write(vi, end); write_unlock_irqrestore(&ni->size_lock, flags); } /* Mark the mft record dirty, so it gets written back. */ flush_dcache_mft_record_page(ctx->ntfs_ino); mark_mft_record_dirty(ctx->ntfs_ino); ntfs_attr_put_search_ctx(ctx); unmap_mft_record(base_ni); ntfs_debug("Done."); return 0; err_out: if (err == -ENOMEM) { ntfs_warning(vi->i_sb, "Error allocating memory required to " "commit the write."); if (PageUptodate(page)) { ntfs_warning(vi->i_sb, "Page is uptodate, setting " "dirty so the write will be retried " "later on by the VM."); /* * Put the page on mapping->dirty_pages, but leave its * buffers' dirty state as-is. */ __set_page_dirty_nobuffers(page); err = 0; } else ntfs_error(vi->i_sb, "Page is not uptodate. Written " "data has been lost."); } else { ntfs_error(vi->i_sb, "Resident attribute commit write failed " "with error %i.", err); NVolSetErrors(ni->vol); } if (ctx) ntfs_attr_put_search_ctx(ctx); if (m) unmap_mft_record(base_ni); return err; } /** * ntfs_file_buffered_write - * * Locking: The vfs is holding ->i_mutex on the inode. */ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos, loff_t *ppos, size_t count) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *vi = mapping->host; ntfs_inode *ni = NTFS_I(vi); ntfs_volume *vol = ni->vol; struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER]; struct page *cached_page = NULL; char __user *buf = NULL; s64 end, ll; VCN last_vcn; LCN lcn; unsigned long flags; size_t bytes, iov_ofs = 0; /* Offset in the current iovec. */ ssize_t status, written; unsigned nr_pages; int err; ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, " "pos 0x%llx, count 0x%lx.", vi->i_ino, (unsigned)le32_to_cpu(ni->type), (unsigned long long)pos, (unsigned long)count); if (unlikely(!count)) return 0; BUG_ON(NInoMstProtected(ni)); /* * If the attribute is not an index root and it is encrypted or * compressed, we cannot write to it yet. Note we need to check for * AT_INDEX_ALLOCATION since this is the type of both directory and * index inodes. */ if (ni->type != AT_INDEX_ALLOCATION) { /* If file is encrypted, deny access, just like NT4. */ if (NInoEncrypted(ni)) { /* * Reminder for later: Encrypted files are _always_ * non-resident so that the content can always be * encrypted. */ ntfs_debug("Denying write access to encrypted file."); return -EACCES; } if (NInoCompressed(ni)) { /* Only unnamed $DATA attribute can be compressed. */ BUG_ON(ni->type != AT_DATA); BUG_ON(ni->name_len); /* * Reminder for later: If resident, the data is not * actually compressed. Only on the switch to non- * resident does compression kick in. This is in * contrast to encrypted files (see above). */ ntfs_error(vi->i_sb, "Writing to compressed files is " "not implemented yet. Sorry."); return -EOPNOTSUPP; } } /* * If a previous ntfs_truncate() failed, repeat it and abort if it * fails again. */ if (unlikely(NInoTruncateFailed(ni))) { inode_dio_wait(vi); err = ntfs_truncate(vi); if (err || NInoTruncateFailed(ni)) { if (!err) err = -EIO; ntfs_error(vol->sb, "Cannot perform write to inode " "0x%lx, attribute type 0x%x, because " "ntfs_truncate() failed (error code " "%i).", vi->i_ino, (unsigned)le32_to_cpu(ni->type), err); return err; } } /* The first byte after the write. */ end = pos + count; /* * If the write goes beyond the allocated size, extend the allocation * to cover the whole of the write, rounded up to the nearest cluster. */ read_lock_irqsave(&ni->size_lock, flags); ll = ni->allocated_size; read_unlock_irqrestore(&ni->size_lock, flags); if (end > ll) { /* Extend the allocation without changing the data size. */ ll = ntfs_attr_extend_allocation(ni, end, -1, pos); if (likely(ll >= 0)) { BUG_ON(pos >= ll); /* If the extension was partial truncate the write. */ if (end > ll) { ntfs_debug("Truncating write to inode 0x%lx, " "attribute type 0x%x, because " "the allocation was only " "partially extended.", vi->i_ino, (unsigned) le32_to_cpu(ni->type)); end = ll; count = ll - pos; } } else { err = ll; read_lock_irqsave(&ni->size_lock, flags); ll = ni->allocated_size; read_unlock_irqrestore(&ni->size_lock, flags); /* Perform a partial write if possible or fail. */ if (pos < ll) { ntfs_debug("Truncating write to inode 0x%lx, " "attribute type 0x%x, because " "extending the allocation " "failed (error code %i).", vi->i_ino, (unsigned) le32_to_cpu(ni->type), err); end = ll; count = ll - pos; } else { ntfs_error(vol->sb, "Cannot perform write to " "inode 0x%lx, attribute type " "0x%x, because extending the " "allocation failed (error " "code %i).", vi->i_ino, (unsigned) le32_to_cpu(ni->type), err); return err; } } } written = 0; /* * If the write starts beyond the initialized size, extend it up to the * beginning of the write and initialize all non-sparse space between * the old initialized size and the new one. This automatically also * increments the vfs inode->i_size to keep it above or equal to the * initialized_size. */ read_lock_irqsave(&ni->size_lock, flags); ll = ni->initialized_size; read_unlock_irqrestore(&ni->size_lock, flags); if (pos > ll) { err = ntfs_attr_extend_initialized(ni, pos); if (err < 0) { ntfs_error(vol->sb, "Cannot perform write to inode " "0x%lx, attribute type 0x%x, because " "extending the initialized size " "failed (error code %i).", vi->i_ino, (unsigned)le32_to_cpu(ni->type), err); status = err; goto err_out; } } /* * Determine the number of pages per cluster for non-resident * attributes. */ nr_pages = 1; if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni)) nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT; /* Finally, perform the actual write. */ last_vcn = -1; if (likely(nr_segs == 1)) buf = iov->iov_base; do { VCN vcn; pgoff_t idx, start_idx; unsigned ofs, do_pages, u; size_t copied; start_idx = idx = pos >> PAGE_CACHE_SHIFT; ofs = pos & ~PAGE_CACHE_MASK; bytes = PAGE_CACHE_SIZE - ofs; do_pages = 1; if (nr_pages > 1) { vcn = pos >> vol->cluster_size_bits; if (vcn != last_vcn) { last_vcn = vcn; /* * Get the lcn of the vcn the write is in. If * it is a hole, need to lock down all pages in * the cluster. */ down_read(&ni->runlist.lock); lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >> vol->cluster_size_bits, false); up_read(&ni->runlist.lock); if (unlikely(lcn < LCN_HOLE)) { status = -EIO; if (lcn == LCN_ENOMEM) status = -ENOMEM; else ntfs_error(vol->sb, "Cannot " "perform write to " "inode 0x%lx, " "attribute type 0x%x, " "because the attribute " "is corrupt.", vi->i_ino, (unsigned) le32_to_cpu(ni->type)); break; } if (lcn == LCN_HOLE) { start_idx = (pos & ~(s64) vol->cluster_size_mask) >> PAGE_CACHE_SHIFT; bytes = vol->cluster_size - (pos & vol->cluster_size_mask); do_pages = nr_pages; } } } if (bytes > count) bytes = count; /* * Bring in the user page(s) that we will copy from _first_. * Otherwise there is a nasty deadlock on copying from the same * page(s) as we are writing to, without it/them being marked * up-to-date. Note, at present there is nothing to stop the * pages being swapped out between us bringing them into memory * and doing the actual copying. */ if (likely(nr_segs == 1)) ntfs_fault_in_pages_readable(buf, bytes); else ntfs_fault_in_pages_readable_iovec(iov, iov_ofs, bytes); /* Get and lock @do_pages starting at index @start_idx. */ status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages, pages, &cached_page); if (unlikely(status)) break; /* * For non-resident attributes, we need to fill any holes with * actual clusters and ensure all bufferes are mapped. We also * need to bring uptodate any buffers that are only partially * being written to. */ if (NInoNonResident(ni)) { status = ntfs_prepare_pages_for_non_resident_write( pages, do_pages, pos, bytes); if (unlikely(status)) { loff_t i_size; do { unlock_page(pages[--do_pages]); page_cache_release(pages[do_pages]); } while (do_pages); /* * The write preparation may have instantiated * allocated space outside i_size. Trim this * off again. We can ignore any errors in this * case as we will just be waisting a bit of * allocated space, which is not a disaster. */ i_size = i_size_read(vi); if (pos + bytes > i_size) vmtruncate(vi, i_size); break; } } u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index; if (likely(nr_segs == 1)) { copied = ntfs_copy_from_user(pages + u, do_pages - u, ofs, buf, bytes); buf += copied; } else copied = ntfs_copy_from_user_iovec(pages + u, do_pages - u, ofs, &iov, &iov_ofs, bytes); ntfs_flush_dcache_pages(pages + u, do_pages - u); status = ntfs_commit_pages_after_write(pages, do_pages, pos, bytes); if (likely(!status)) { written += copied; count -= copied; pos += copied; if (unlikely(copied != bytes)) status = -EFAULT; } do { unlock_page(pages[--do_pages]); mark_page_accessed(pages[do_pages]); page_cache_release(pages[do_pages]); } while (do_pages); if (unlikely(status)) break; balance_dirty_pages_ratelimited(mapping); cond_resched(); } while (count); err_out: *ppos = pos; if (cached_page) page_cache_release(cached_page); ntfs_debug("Done. Returning %s (written 0x%lx, status %li).", written ? "written" : "status", (unsigned long)written, (long)status); return written ? written : status; } /** * ntfs_file_aio_write_nolock - */ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; loff_t pos; size_t count; /* after file limit checks */ ssize_t written, err; count = 0; err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ); if (err) return err; pos = *ppos; vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); /* We can write back this queue in page reclaim. */ current->backing_dev_info = mapping->backing_dev_info; written = 0; err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (err) goto out; if (!count) goto out; err = file_remove_suid(file); if (err) goto out; err = file_update_time(file); if (err) goto out; written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos, count); out: current->backing_dev_info = NULL; return written ? written : err; } /** * ntfs_file_aio_write - */ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; ssize_t ret; BUG_ON(iocb->ki_pos != pos); mutex_lock(&inode->i_mutex); ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos); mutex_unlock(&inode->i_mutex); if (ret > 0) { int err = generic_write_sync(file, pos, ret); if (err < 0) ret = err; } return ret; } /** * ntfs_file_fsync - sync a file to disk * @filp: file to be synced * @datasync: if non-zero only flush user data and not metadata * * Data integrity sync of a file to disk. Used for fsync, fdatasync, and msync * system calls. This function is inspired by fs/buffer.c::file_fsync(). * * If @datasync is false, write the mft record and all associated extent mft * records as well as the $DATA attribute and then sync the block device. * * If @datasync is true and the attribute is non-resident, we skip the writing * of the mft record and all associated extent mft records (this might still * happen due to the write_inode_now() call). * * Also, if @datasync is true, we do not wait on the inode to be written out * but we always wait on the page cache pages to be written out. * * Locking: Caller must hold i_mutex on the inode. * * TODO: We should probably also write all attribute/index inodes associated * with this inode but since we have no simple way of getting to them we ignore * this problem for now. */ static int ntfs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *vi = filp->f_mapping->host; int err, ret = 0; ntfs_debug("Entering for inode 0x%lx.", vi->i_ino); err = filemap_write_and_wait_range(vi->i_mapping, start, end); if (err) return err; mutex_lock(&vi->i_mutex); BUG_ON(S_ISDIR(vi->i_mode)); if (!datasync || !NInoNonResident(NTFS_I(vi))) ret = __ntfs_write_inode(vi, 1); write_inode_now(vi, !datasync); /* * NOTE: If we were to use mapping->private_list (see ext2 and * fs/buffer.c) for dirty blocks then we could optimize the below to be * sync_mapping_buffers(vi->i_mapping). */ err = sync_blockdev(vi->i_sb->s_bdev); if (unlikely(err && !ret)) ret = err; if (likely(!ret)) ntfs_debug("Done."); else ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error " "%u.", datasync ? "data" : "", vi->i_ino, -ret); mutex_unlock(&vi->i_mutex); return ret; } #endif /* NTFS_RW */ const struct file_operations ntfs_file_ops = { .llseek = generic_file_llseek, /* Seek inside file. */ .read = do_sync_read, /* Read from file. */ .aio_read = generic_file_aio_read, /* Async read from file. */ #ifdef NTFS_RW .write = do_sync_write, /* Write to file. */ .aio_write = ntfs_file_aio_write, /* Async write to file. */ /*.release = ,*/ /* Last file is closed. See fs/ext2/file.c:: ext2_release_file() for how to use this to discard preallocated space for write opened files. */ .fsync = ntfs_file_fsync, /* Sync a file to disk. */ /*.aio_fsync = ,*/ /* Sync all outstanding async i/o operations on a kiocb. */ #endif /* NTFS_RW */ /*.ioctl = ,*/ /* Perform function on the mounted filesystem. */ .mmap = generic_file_mmap, /* Mmap file. */ .open = ntfs_file_open, /* Open file. */ .splice_read = generic_file_splice_read /* Zero-copy data send with the data source being on the ntfs partition. We do not need to care about the data destination. */ /*.sendpage = ,*/ /* Zero-copy data send with the data destination being on the ntfs partition. We do not need to care about the data source. */ }; const struct inode_operations ntfs_file_inode_ops = { #ifdef NTFS_RW .truncate = ntfs_truncate_vfs, .setattr = ntfs_setattr, #endif /* NTFS_RW */ }; const struct file_operations ntfs_empty_file_ops = {}; const struct inode_operations ntfs_empty_inode_ops = {};
gpl-2.0
HyochanPyo/kernel_3.18.9
drivers/gpu/drm/nouveau/nvc0_fbcon.c
845
7082
/* * Copyright 2010 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "nouveau_drm.h" #include "nouveau_dma.h" #include "nouveau_fbcon.h" int nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct nouveau_fbdev *nfbdev = info->par; struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); struct nouveau_channel *chan = drm->channel; int ret; ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11); if (ret) return ret; if (rect->rop != ROP_COPY) { BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1); OUT_RING (chan, 1); } BEGIN_NVC0(chan, NvSub2D, 0x0588, 1); if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) OUT_RING (chan, ((uint32_t *)info->pseudo_palette)[rect->color]); else OUT_RING (chan, rect->color); BEGIN_NVC0(chan, NvSub2D, 0x0600, 4); OUT_RING (chan, rect->dx); OUT_RING (chan, rect->dy); OUT_RING (chan, rect->dx + rect->width); OUT_RING (chan, rect->dy + rect->height); if (rect->rop != ROP_COPY) { BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1); OUT_RING (chan, 3); } FIRE_RING(chan); return 0; } int nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) { struct nouveau_fbdev *nfbdev = info->par; struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); struct nouveau_channel *chan = drm->channel; int ret; ret = RING_SPACE(chan, 12); if (ret) return ret; BEGIN_NVC0(chan, NvSub2D, 0x0110, 1); OUT_RING (chan, 0); BEGIN_NVC0(chan, NvSub2D, 0x08b0, 4); OUT_RING (chan, region->dx); OUT_RING (chan, region->dy); OUT_RING (chan, region->width); OUT_RING (chan, region->height); BEGIN_NVC0(chan, NvSub2D, 0x08d0, 4); OUT_RING (chan, 0); OUT_RING (chan, region->sx); OUT_RING (chan, 0); OUT_RING (chan, region->sy); FIRE_RING(chan); return 0; } int nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) { struct nouveau_fbdev *nfbdev = info->par; struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); struct nouveau_channel *chan = drm->channel; uint32_t width, dwords, *data = (uint32_t *)image->data; uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); uint32_t *palette = info->pseudo_palette; int ret; if (image->depth != 1) return -ENODEV; ret = RING_SPACE(chan, 11); if (ret) return ret; width = ALIGN(image->width, 32); dwords = (width * image->height) >> 5; BEGIN_NVC0(chan, NvSub2D, 0x0814, 2); if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { OUT_RING (chan, palette[image->bg_color] | mask); OUT_RING (chan, palette[image->fg_color] | mask); } else { OUT_RING (chan, image->bg_color); OUT_RING (chan, image->fg_color); } BEGIN_NVC0(chan, NvSub2D, 0x0838, 2); OUT_RING (chan, image->width); OUT_RING (chan, image->height); BEGIN_NVC0(chan, NvSub2D, 0x0850, 4); OUT_RING (chan, 0); OUT_RING (chan, image->dx); OUT_RING (chan, 0); OUT_RING (chan, image->dy); while (dwords) { int push = dwords > 2047 ? 2047 : dwords; ret = RING_SPACE(chan, push + 1); if (ret) return ret; dwords -= push; BEGIN_NIC0(chan, NvSub2D, 0x0860, push); OUT_RINGp(chan, data, push); data += push; } FIRE_RING(chan); return 0; } int nvc0_fbcon_accel_init(struct fb_info *info) { struct nouveau_fbdev *nfbdev = info->par; struct drm_device *dev = nfbdev->dev; struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb; struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_channel *chan = drm->channel; int ret, format; ret = nvif_object_init(chan->object, NULL, 0x902d, 0x902d, NULL, 0, &nfbdev->twod); if (ret) return ret; switch (info->var.bits_per_pixel) { case 8: format = 0xf3; break; case 15: format = 0xf8; break; case 16: format = 0xe8; break; case 32: switch (info->var.transp.length) { case 0: /* depth 24 */ case 8: /* depth 32, just use 24.. */ format = 0xe6; break; case 2: /* depth 30 */ format = 0xd1; break; default: return -EINVAL; } break; default: return -EINVAL; } ret = RING_SPACE(chan, 60); if (ret) { WARN_ON(1); nouveau_fbcon_gpu_lockup(info); return ret; } BEGIN_NVC0(chan, NvSub2D, 0x0000, 1); OUT_RING (chan, nfbdev->twod.handle); BEGIN_NVC0(chan, NvSub2D, 0x0290, 1); OUT_RING (chan, 0); BEGIN_NVC0(chan, NvSub2D, 0x0888, 1); OUT_RING (chan, 1); BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1); OUT_RING (chan, 3); BEGIN_NVC0(chan, NvSub2D, 0x02a0, 1); OUT_RING (chan, 0x55); BEGIN_NVC0(chan, NvSub2D, 0x08c0, 4); OUT_RING (chan, 0); OUT_RING (chan, 1); OUT_RING (chan, 0); OUT_RING (chan, 1); BEGIN_NVC0(chan, NvSub2D, 0x0580, 2); OUT_RING (chan, 4); OUT_RING (chan, format); BEGIN_NVC0(chan, NvSub2D, 0x02e8, 2); OUT_RING (chan, 2); OUT_RING (chan, 1); BEGIN_NVC0(chan, NvSub2D, 0x0804, 1); OUT_RING (chan, format); BEGIN_NVC0(chan, NvSub2D, 0x0800, 1); OUT_RING (chan, 1); BEGIN_NVC0(chan, NvSub2D, 0x0808, 3); OUT_RING (chan, 0); OUT_RING (chan, 0); OUT_RING (chan, 1); BEGIN_NVC0(chan, NvSub2D, 0x081c, 1); OUT_RING (chan, 1); BEGIN_NVC0(chan, NvSub2D, 0x0840, 4); OUT_RING (chan, 0); OUT_RING (chan, 1); OUT_RING (chan, 0); OUT_RING (chan, 1); BEGIN_NVC0(chan, NvSub2D, 0x0200, 10); OUT_RING (chan, format); OUT_RING (chan, 1); OUT_RING (chan, 0); OUT_RING (chan, 1); OUT_RING (chan, 0); OUT_RING (chan, info->fix.line_length); OUT_RING (chan, info->var.xres_virtual); OUT_RING (chan, info->var.yres_virtual); OUT_RING (chan, upper_32_bits(fb->vma.offset)); OUT_RING (chan, lower_32_bits(fb->vma.offset)); BEGIN_NVC0(chan, NvSub2D, 0x0230, 10); OUT_RING (chan, format); OUT_RING (chan, 1); OUT_RING (chan, 0); OUT_RING (chan, 1); OUT_RING (chan, 0); OUT_RING (chan, info->fix.line_length); OUT_RING (chan, info->var.xres_virtual); OUT_RING (chan, info->var.yres_virtual); OUT_RING (chan, upper_32_bits(fb->vma.offset)); OUT_RING (chan, lower_32_bits(fb->vma.offset)); FIRE_RING (chan); return 0; }
gpl-2.0
Perferom/android_kernel_zte_msm7x27
Documentation/vm/page-types.c
845
21876
/* * page-types: Tool for querying page flags * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; version 2. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should find a copy of v2 of the GNU General Public License somewhere on * your Linux system; if not, write to the Free Software Foundation, Inc., 59 * Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * Copyright (C) 2009 Intel corporation * * Authors: Wu Fengguang <fengguang.wu@intel.com> */ #define _LARGEFILE64_SOURCE #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdint.h> #include <stdarg.h> #include <string.h> #include <getopt.h> #include <limits.h> #include <assert.h> #include <sys/types.h> #include <sys/errno.h> #include <sys/fcntl.h> /* * pagemap kernel ABI bits */ #define PM_ENTRY_BYTES sizeof(uint64_t) #define PM_STATUS_BITS 3 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK) #define PM_PSHIFT_BITS 6 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS) #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET) #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1) #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) #define PM_PRESENT PM_STATUS(4LL) #define PM_SWAP PM_STATUS(2LL) /* * kernel page flags */ #define KPF_BYTES 8 #define PROC_KPAGEFLAGS "/proc/kpageflags" /* copied from kpageflags_read() */ #define KPF_LOCKED 0 #define KPF_ERROR 1 #define KPF_REFERENCED 2 #define KPF_UPTODATE 3 #define KPF_DIRTY 4 #define KPF_LRU 5 #define KPF_ACTIVE 6 #define KPF_SLAB 7 #define KPF_WRITEBACK 8 #define KPF_RECLAIM 9 #define KPF_BUDDY 10 /* [11-20] new additions in 2.6.31 */ #define KPF_MMAP 11 #define KPF_ANON 12 #define KPF_SWAPCACHE 13 #define KPF_SWAPBACKED 14 #define KPF_COMPOUND_HEAD 15 #define KPF_COMPOUND_TAIL 16 #define KPF_HUGE 17 #define KPF_UNEVICTABLE 18 #define KPF_HWPOISON 19 #define KPF_NOPAGE 20 #define KPF_KSM 21 /* [32-] kernel hacking assistances */ #define KPF_RESERVED 32 #define KPF_MLOCKED 33 #define KPF_MAPPEDTODISK 34 #define KPF_PRIVATE 35 #define KPF_PRIVATE_2 36 #define KPF_OWNER_PRIVATE 37 #define KPF_ARCH 38 #define KPF_UNCACHED 39 /* [48-] take some arbitrary free slots for expanding overloaded flags * not part of kernel API */ #define KPF_READAHEAD 48 #define KPF_SLOB_FREE 49 #define KPF_SLUB_FROZEN 50 #define KPF_SLUB_DEBUG 51 #define KPF_ALL_BITS ((uint64_t)~0ULL) #define KPF_HACKERS_BITS (0xffffULL << 32) #define KPF_OVERLOADED_BITS (0xffffULL << 48) #define BIT(name) (1ULL << KPF_##name) #define BITS_COMPOUND (BIT(COMPOUND_HEAD) | BIT(COMPOUND_TAIL)) static const char *page_flag_names[] = { [KPF_LOCKED] = "L:locked", [KPF_ERROR] = "E:error", [KPF_REFERENCED] = "R:referenced", [KPF_UPTODATE] = "U:uptodate", [KPF_DIRTY] = "D:dirty", [KPF_LRU] = "l:lru", [KPF_ACTIVE] = "A:active", [KPF_SLAB] = "S:slab", [KPF_WRITEBACK] = "W:writeback", [KPF_RECLAIM] = "I:reclaim", [KPF_BUDDY] = "B:buddy", [KPF_MMAP] = "M:mmap", [KPF_ANON] = "a:anonymous", [KPF_SWAPCACHE] = "s:swapcache", [KPF_SWAPBACKED] = "b:swapbacked", [KPF_COMPOUND_HEAD] = "H:compound_head", [KPF_COMPOUND_TAIL] = "T:compound_tail", [KPF_HUGE] = "G:huge", [KPF_UNEVICTABLE] = "u:unevictable", [KPF_HWPOISON] = "X:hwpoison", [KPF_NOPAGE] = "n:nopage", [KPF_KSM] = "x:ksm", [KPF_RESERVED] = "r:reserved", [KPF_MLOCKED] = "m:mlocked", [KPF_MAPPEDTODISK] = "d:mappedtodisk", [KPF_PRIVATE] = "P:private", [KPF_PRIVATE_2] = "p:private_2", [KPF_OWNER_PRIVATE] = "O:owner_private", [KPF_ARCH] = "h:arch", [KPF_UNCACHED] = "c:uncached", [KPF_READAHEAD] = "I:readahead", [KPF_SLOB_FREE] = "P:slob_free", [KPF_SLUB_FROZEN] = "A:slub_frozen", [KPF_SLUB_DEBUG] = "E:slub_debug", }; /* * data structures */ static int opt_raw; /* for kernel developers */ static int opt_list; /* list pages (in ranges) */ static int opt_no_summary; /* don't show summary */ static pid_t opt_pid; /* process to walk */ #define MAX_ADDR_RANGES 1024 static int nr_addr_ranges; static unsigned long opt_offset[MAX_ADDR_RANGES]; static unsigned long opt_size[MAX_ADDR_RANGES]; #define MAX_VMAS 10240 static int nr_vmas; static unsigned long pg_start[MAX_VMAS]; static unsigned long pg_end[MAX_VMAS]; #define MAX_BIT_FILTERS 64 static int nr_bit_filters; static uint64_t opt_mask[MAX_BIT_FILTERS]; static uint64_t opt_bits[MAX_BIT_FILTERS]; static int page_size; static int pagemap_fd; static int kpageflags_fd; static int opt_hwpoison; static int opt_unpoison; static const char hwpoison_debug_fs[] = "/debug/hwpoison"; static int hwpoison_inject_fd; static int hwpoison_forget_fd; #define HASH_SHIFT 13 #define HASH_SIZE (1 << HASH_SHIFT) #define HASH_MASK (HASH_SIZE - 1) #define HASH_KEY(flags) (flags & HASH_MASK) static unsigned long total_pages; static unsigned long nr_pages[HASH_SIZE]; static uint64_t page_flags[HASH_SIZE]; /* * helper functions */ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define min_t(type, x, y) ({ \ type __min1 = (x); \ type __min2 = (y); \ __min1 < __min2 ? __min1 : __min2; }) #define max_t(type, x, y) ({ \ type __max1 = (x); \ type __max2 = (y); \ __max1 > __max2 ? __max1 : __max2; }) static unsigned long pages2mb(unsigned long pages) { return (pages * page_size) >> 20; } static void fatal(const char *x, ...) { va_list ap; va_start(ap, x); vfprintf(stderr, x, ap); va_end(ap); exit(EXIT_FAILURE); } static int checked_open(const char *pathname, int flags) { int fd = open(pathname, flags); if (fd < 0) { perror(pathname); exit(EXIT_FAILURE); } return fd; } /* * pagemap/kpageflags routines */ static unsigned long do_u64_read(int fd, char *name, uint64_t *buf, unsigned long index, unsigned long count) { long bytes; if (index > ULONG_MAX / 8) fatal("index overflow: %lu\n", index); if (lseek(fd, index * 8, SEEK_SET) < 0) { perror(name); exit(EXIT_FAILURE); } bytes = read(fd, buf, count * 8); if (bytes < 0) { perror(name); exit(EXIT_FAILURE); } if (bytes % 8) fatal("partial read: %lu bytes\n", bytes); return bytes / 8; } static unsigned long kpageflags_read(uint64_t *buf, unsigned long index, unsigned long pages) { return do_u64_read(kpageflags_fd, PROC_KPAGEFLAGS, buf, index, pages); } static unsigned long pagemap_read(uint64_t *buf, unsigned long index, unsigned long pages) { return do_u64_read(pagemap_fd, "/proc/pid/pagemap", buf, index, pages); } static unsigned long pagemap_pfn(uint64_t val) { unsigned long pfn; if (val & PM_PRESENT) pfn = PM_PFRAME(val); else pfn = 0; return pfn; } /* * page flag names */ static char *page_flag_name(uint64_t flags) { static char buf[65]; int present; int i, j; for (i = 0, j = 0; i < ARRAY_SIZE(page_flag_names); i++) { present = (flags >> i) & 1; if (!page_flag_names[i]) { if (present) fatal("unknown flag bit %d\n", i); continue; } buf[j++] = present ? page_flag_names[i][0] : '_'; } return buf; } static char *page_flag_longname(uint64_t flags) { static char buf[1024]; int i, n; for (i = 0, n = 0; i < ARRAY_SIZE(page_flag_names); i++) { if (!page_flag_names[i]) continue; if ((flags >> i) & 1) n += snprintf(buf + n, sizeof(buf) - n, "%s,", page_flag_names[i] + 2); } if (n) n--; buf[n] = '\0'; return buf; } /* * page list and summary */ static void show_page_range(unsigned long voffset, unsigned long offset, uint64_t flags) { static uint64_t flags0; static unsigned long voff; static unsigned long index; static unsigned long count; if (flags == flags0 && offset == index + count && (!opt_pid || voffset == voff + count)) { count++; return; } if (count) { if (opt_pid) printf("%lx\t", voff); printf("%lx\t%lx\t%s\n", index, count, page_flag_name(flags0)); } flags0 = flags; index = offset; voff = voffset; count = 1; } static void show_page(unsigned long voffset, unsigned long offset, uint64_t flags) { if (opt_pid) printf("%lx\t", voffset); printf("%lx\t%s\n", offset, page_flag_name(flags)); } static void show_summary(void) { int i; printf(" flags\tpage-count MB" " symbolic-flags\t\t\tlong-symbolic-flags\n"); for (i = 0; i < ARRAY_SIZE(nr_pages); i++) { if (nr_pages[i]) printf("0x%016llx\t%10lu %8lu %s\t%s\n", (unsigned long long)page_flags[i], nr_pages[i], pages2mb(nr_pages[i]), page_flag_name(page_flags[i]), page_flag_longname(page_flags[i])); } printf(" total\t%10lu %8lu\n", total_pages, pages2mb(total_pages)); } /* * page flag filters */ static int bit_mask_ok(uint64_t flags) { int i; for (i = 0; i < nr_bit_filters; i++) { if (opt_bits[i] == KPF_ALL_BITS) { if ((flags & opt_mask[i]) == 0) return 0; } else { if ((flags & opt_mask[i]) != opt_bits[i]) return 0; } } return 1; } static uint64_t expand_overloaded_flags(uint64_t flags) { /* SLOB/SLUB overload several page flags */ if (flags & BIT(SLAB)) { if (flags & BIT(PRIVATE)) flags ^= BIT(PRIVATE) | BIT(SLOB_FREE); if (flags & BIT(ACTIVE)) flags ^= BIT(ACTIVE) | BIT(SLUB_FROZEN); if (flags & BIT(ERROR)) flags ^= BIT(ERROR) | BIT(SLUB_DEBUG); } /* PG_reclaim is overloaded as PG_readahead in the read path */ if ((flags & (BIT(RECLAIM) | BIT(WRITEBACK))) == BIT(RECLAIM)) flags ^= BIT(RECLAIM) | BIT(READAHEAD); return flags; } static uint64_t well_known_flags(uint64_t flags) { /* hide flags intended only for kernel hacker */ flags &= ~KPF_HACKERS_BITS; /* hide non-hugeTLB compound pages */ if ((flags & BITS_COMPOUND) && !(flags & BIT(HUGE))) flags &= ~BITS_COMPOUND; return flags; } static uint64_t kpageflags_flags(uint64_t flags) { flags = expand_overloaded_flags(flags); if (!opt_raw) flags = well_known_flags(flags); return flags; } /* * page actions */ static void prepare_hwpoison_fd(void) { char buf[100]; if (opt_hwpoison && !hwpoison_inject_fd) { sprintf(buf, "%s/corrupt-pfn", hwpoison_debug_fs); hwpoison_inject_fd = checked_open(buf, O_WRONLY); } if (opt_unpoison && !hwpoison_forget_fd) { sprintf(buf, "%s/renew-pfn", hwpoison_debug_fs); hwpoison_forget_fd = checked_open(buf, O_WRONLY); } } static int hwpoison_page(unsigned long offset) { char buf[100]; int len; len = sprintf(buf, "0x%lx\n", offset); len = write(hwpoison_inject_fd, buf, len); if (len < 0) { perror("hwpoison inject"); return len; } return 0; } static int unpoison_page(unsigned long offset) { char buf[100]; int len; len = sprintf(buf, "0x%lx\n", offset); len = write(hwpoison_forget_fd, buf, len); if (len < 0) { perror("hwpoison forget"); return len; } return 0; } /* * page frame walker */ static int hash_slot(uint64_t flags) { int k = HASH_KEY(flags); int i; /* Explicitly reserve slot 0 for flags 0: the following logic * cannot distinguish an unoccupied slot from slot (flags==0). */ if (flags == 0) return 0; /* search through the remaining (HASH_SIZE-1) slots */ for (i = 1; i < ARRAY_SIZE(page_flags); i++, k++) { if (!k || k >= ARRAY_SIZE(page_flags)) k = 1; if (page_flags[k] == 0) { page_flags[k] = flags; return k; } if (page_flags[k] == flags) return k; } fatal("hash table full: bump up HASH_SHIFT?\n"); exit(EXIT_FAILURE); } static void add_page(unsigned long voffset, unsigned long offset, uint64_t flags) { flags = kpageflags_flags(flags); if (!bit_mask_ok(flags)) return; if (opt_hwpoison) hwpoison_page(offset); if (opt_unpoison) unpoison_page(offset); if (opt_list == 1) show_page_range(voffset, offset, flags); else if (opt_list == 2) show_page(voffset, offset, flags); nr_pages[hash_slot(flags)]++; total_pages++; } #define KPAGEFLAGS_BATCH (64 << 10) /* 64k pages */ static void walk_pfn(unsigned long voffset, unsigned long index, unsigned long count) { uint64_t buf[KPAGEFLAGS_BATCH]; unsigned long batch; long pages; unsigned long i; while (count) { batch = min_t(unsigned long, count, KPAGEFLAGS_BATCH); pages = kpageflags_read(buf, index, batch); if (pages == 0) break; for (i = 0; i < pages; i++) add_page(voffset + i, index + i, buf[i]); index += pages; count -= pages; } } #define PAGEMAP_BATCH (64 << 10) static void walk_vma(unsigned long index, unsigned long count) { uint64_t buf[PAGEMAP_BATCH]; unsigned long batch; unsigned long pages; unsigned long pfn; unsigned long i; while (count) { batch = min_t(unsigned long, count, PAGEMAP_BATCH); pages = pagemap_read(buf, index, batch); if (pages == 0) break; for (i = 0; i < pages; i++) { pfn = pagemap_pfn(buf[i]); if (pfn) walk_pfn(index + i, pfn, 1); } index += pages; count -= pages; } } static void walk_task(unsigned long index, unsigned long count) { const unsigned long end = index + count; unsigned long start; int i = 0; while (index < end) { while (pg_end[i] <= index) if (++i >= nr_vmas) return; if (pg_start[i] >= end) return; start = max_t(unsigned long, pg_start[i], index); index = min_t(unsigned long, pg_end[i], end); assert(start < index); walk_vma(start, index - start); } } static void add_addr_range(unsigned long offset, unsigned long size) { if (nr_addr_ranges >= MAX_ADDR_RANGES) fatal("too many addr ranges\n"); opt_offset[nr_addr_ranges] = offset; opt_size[nr_addr_ranges] = min_t(unsigned long, size, ULONG_MAX-offset); nr_addr_ranges++; } static void walk_addr_ranges(void) { int i; kpageflags_fd = checked_open(PROC_KPAGEFLAGS, O_RDONLY); if (!nr_addr_ranges) add_addr_range(0, ULONG_MAX); for (i = 0; i < nr_addr_ranges; i++) if (!opt_pid) walk_pfn(0, opt_offset[i], opt_size[i]); else walk_task(opt_offset[i], opt_size[i]); close(kpageflags_fd); } /* * user interface */ static const char *page_flag_type(uint64_t flag) { if (flag & KPF_HACKERS_BITS) return "(r)"; if (flag & KPF_OVERLOADED_BITS) return "(o)"; return " "; } static void usage(void) { int i, j; printf( "page-types [options]\n" " -r|--raw Raw mode, for kernel developers\n" " -d|--describe flags Describe flags\n" " -a|--addr addr-spec Walk a range of pages\n" " -b|--bits bits-spec Walk pages with specified bits\n" " -p|--pid pid Walk process address space\n" #if 0 /* planned features */ " -f|--file filename Walk file address space\n" #endif " -l|--list Show page details in ranges\n" " -L|--list-each Show page details one by one\n" " -N|--no-summary Don't show summay info\n" " -X|--hwpoison hwpoison pages\n" " -x|--unpoison unpoison pages\n" " -h|--help Show this usage message\n" "flags:\n" " 0x10 bitfield format, e.g.\n" " anon bit-name, e.g.\n" " 0x10,anon comma-separated list, e.g.\n" "addr-spec:\n" " N one page at offset N (unit: pages)\n" " N+M pages range from N to N+M-1\n" " N,M pages range from N to M-1\n" " N, pages range from N to end\n" " ,M pages range from 0 to M-1\n" "bits-spec:\n" " bit1,bit2 (flags & (bit1|bit2)) != 0\n" " bit1,bit2=bit1 (flags & (bit1|bit2)) == bit1\n" " bit1,~bit2 (flags & (bit1|bit2)) == bit1\n" " =bit1,bit2 flags == (bit1|bit2)\n" "bit-names:\n" ); for (i = 0, j = 0; i < ARRAY_SIZE(page_flag_names); i++) { if (!page_flag_names[i]) continue; printf("%16s%s", page_flag_names[i] + 2, page_flag_type(1ULL << i)); if (++j > 3) { j = 0; putchar('\n'); } } printf("\n " "(r) raw mode bits (o) overloaded bits\n"); } static unsigned long long parse_number(const char *str) { unsigned long long n; n = strtoll(str, NULL, 0); if (n == 0 && str[0] != '0') fatal("invalid name or number: %s\n", str); return n; } static void parse_pid(const char *str) { FILE *file; char buf[5000]; opt_pid = parse_number(str); sprintf(buf, "/proc/%d/pagemap", opt_pid); pagemap_fd = checked_open(buf, O_RDONLY); sprintf(buf, "/proc/%d/maps", opt_pid); file = fopen(buf, "r"); if (!file) { perror(buf); exit(EXIT_FAILURE); } while (fgets(buf, sizeof(buf), file) != NULL) { unsigned long vm_start; unsigned long vm_end; unsigned long long pgoff; int major, minor; char r, w, x, s; unsigned long ino; int n; n = sscanf(buf, "%lx-%lx %c%c%c%c %llx %x:%x %lu", &vm_start, &vm_end, &r, &w, &x, &s, &pgoff, &major, &minor, &ino); if (n < 10) { fprintf(stderr, "unexpected line: %s\n", buf); continue; } pg_start[nr_vmas] = vm_start / page_size; pg_end[nr_vmas] = vm_end / page_size; if (++nr_vmas >= MAX_VMAS) { fprintf(stderr, "too many VMAs\n"); break; } } fclose(file); } static void parse_file(const char *name) { } static void parse_addr_range(const char *optarg) { unsigned long offset; unsigned long size; char *p; p = strchr(optarg, ','); if (!p) p = strchr(optarg, '+'); if (p == optarg) { offset = 0; size = parse_number(p + 1); } else if (p) { offset = parse_number(optarg); if (p[1] == '\0') size = ULONG_MAX; else { size = parse_number(p + 1); if (*p == ',') { if (size < offset) fatal("invalid range: %lu,%lu\n", offset, size); size -= offset; } } } else { offset = parse_number(optarg); size = 1; } add_addr_range(offset, size); } static void add_bits_filter(uint64_t mask, uint64_t bits) { if (nr_bit_filters >= MAX_BIT_FILTERS) fatal("too much bit filters\n"); opt_mask[nr_bit_filters] = mask; opt_bits[nr_bit_filters] = bits; nr_bit_filters++; } static uint64_t parse_flag_name(const char *str, int len) { int i; if (!*str || !len) return 0; if (len <= 8 && !strncmp(str, "compound", len)) return BITS_COMPOUND; for (i = 0; i < ARRAY_SIZE(page_flag_names); i++) { if (!page_flag_names[i]) continue; if (!strncmp(str, page_flag_names[i] + 2, len)) return 1ULL << i; } return parse_number(str); } static uint64_t parse_flag_names(const char *str, int all) { const char *p = str; uint64_t flags = 0; while (1) { if (*p == ',' || *p == '=' || *p == '\0') { if ((*str != '~') || (*str == '~' && all && *++str)) flags |= parse_flag_name(str, p - str); if (*p != ',') break; str = p + 1; } p++; } return flags; } static void parse_bits_mask(const char *optarg) { uint64_t mask; uint64_t bits; const char *p; p = strchr(optarg, '='); if (p == optarg) { mask = KPF_ALL_BITS; bits = parse_flag_names(p + 1, 0); } else if (p) { mask = parse_flag_names(optarg, 0); bits = parse_flag_names(p + 1, 0); } else if (strchr(optarg, '~')) { mask = parse_flag_names(optarg, 1); bits = parse_flag_names(optarg, 0); } else { mask = parse_flag_names(optarg, 0); bits = KPF_ALL_BITS; } add_bits_filter(mask, bits); } static void describe_flags(const char *optarg) { uint64_t flags = parse_flag_names(optarg, 0); printf("0x%016llx\t%s\t%s\n", (unsigned long long)flags, page_flag_name(flags), page_flag_longname(flags)); } static const struct option opts[] = { { "raw" , 0, NULL, 'r' }, { "pid" , 1, NULL, 'p' }, { "file" , 1, NULL, 'f' }, { "addr" , 1, NULL, 'a' }, { "bits" , 1, NULL, 'b' }, { "describe" , 1, NULL, 'd' }, { "list" , 0, NULL, 'l' }, { "list-each" , 0, NULL, 'L' }, { "no-summary", 0, NULL, 'N' }, { "hwpoison" , 0, NULL, 'X' }, { "unpoison" , 0, NULL, 'x' }, { "help" , 0, NULL, 'h' }, { NULL , 0, NULL, 0 } }; int main(int argc, char *argv[]) { int c; page_size = getpagesize(); while ((c = getopt_long(argc, argv, "rp:f:a:b:d:lLNXxh", opts, NULL)) != -1) { switch (c) { case 'r': opt_raw = 1; break; case 'p': parse_pid(optarg); break; case 'f': parse_file(optarg); break; case 'a': parse_addr_range(optarg); break; case 'b': parse_bits_mask(optarg); break; case 'd': describe_flags(optarg); exit(0); case 'l': opt_list = 1; break; case 'L': opt_list = 2; break; case 'N': opt_no_summary = 1; break; case 'X': opt_hwpoison = 1; prepare_hwpoison_fd(); break; case 'x': opt_unpoison = 1; prepare_hwpoison_fd(); break; case 'h': usage(); exit(0); default: usage(); exit(1); } } if (opt_list && opt_pid) printf("voffset\t"); if (opt_list == 1) printf("offset\tlen\tflags\n"); if (opt_list == 2) printf("offset\tflags\n"); walk_addr_ranges(); if (opt_list == 1) show_page_range(0, 0, 0); /* drain the buffer */ if (opt_no_summary) return 0; if (opt_list) printf("\n\n"); show_summary(); return 0; }
gpl-2.0
zhaiyu/linux-2.6
drivers/video/fbdev/tcx.c
845
12289
/* tcx.c: TCX frame buffer driver * * Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net) * Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz) * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * * Driver layout based loosely on tgafb.c, see that file for credits. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/fb.h> #include <linux/mm.h> #include <linux/of_device.h> #include <asm/io.h> #include <asm/fbio.h> #include "sbuslib.h" /* * Local functions. */ static int tcx_setcolreg(unsigned, unsigned, unsigned, unsigned, unsigned, struct fb_info *); static int tcx_blank(int, struct fb_info *); static int tcx_mmap(struct fb_info *, struct vm_area_struct *); static int tcx_ioctl(struct fb_info *, unsigned int, unsigned long); static int tcx_pan_display(struct fb_var_screeninfo *, struct fb_info *); /* * Frame buffer operations */ static struct fb_ops tcx_ops = { .owner = THIS_MODULE, .fb_setcolreg = tcx_setcolreg, .fb_blank = tcx_blank, .fb_pan_display = tcx_pan_display, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_mmap = tcx_mmap, .fb_ioctl = tcx_ioctl, #ifdef CONFIG_COMPAT .fb_compat_ioctl = sbusfb_compat_ioctl, #endif }; /* THC definitions */ #define TCX_THC_MISC_REV_SHIFT 16 #define TCX_THC_MISC_REV_MASK 15 #define TCX_THC_MISC_VSYNC_DIS (1 << 25) #define TCX_THC_MISC_HSYNC_DIS (1 << 24) #define TCX_THC_MISC_RESET (1 << 12) #define TCX_THC_MISC_VIDEO (1 << 10) #define TCX_THC_MISC_SYNC (1 << 9) #define TCX_THC_MISC_VSYNC (1 << 8) #define TCX_THC_MISC_SYNC_ENAB (1 << 7) #define TCX_THC_MISC_CURS_RES (1 << 6) #define TCX_THC_MISC_INT_ENAB (1 << 5) #define TCX_THC_MISC_INT (1 << 4) #define TCX_THC_MISC_INIT 0x9f #define TCX_THC_REV_REV_SHIFT 20 #define TCX_THC_REV_REV_MASK 15 #define TCX_THC_REV_MINREV_SHIFT 28 #define TCX_THC_REV_MINREV_MASK 15 /* The contents are unknown */ struct tcx_tec { u32 tec_matrix; u32 tec_clip; u32 tec_vdc; }; struct tcx_thc { u32 thc_rev; u32 thc_pad0[511]; u32 thc_hs; /* hsync timing */ u32 thc_hsdvs; u32 thc_hd; u32 thc_vs; /* vsync timing */ u32 thc_vd; u32 thc_refresh; u32 thc_misc; u32 thc_pad1[56]; u32 thc_cursxy; /* cursor x,y position (16 bits each) */ u32 thc_cursmask[32]; /* cursor mask bits */ u32 thc_cursbits[32]; /* what to show where mask enabled */ }; struct bt_regs { u32 addr; u32 color_map; u32 control; u32 cursor; }; #define TCX_MMAP_ENTRIES 14 struct tcx_par { spinlock_t lock; struct bt_regs __iomem *bt; struct tcx_thc __iomem *thc; struct tcx_tec __iomem *tec; u32 __iomem *cplane; u32 flags; #define TCX_FLAG_BLANKED 0x00000001 unsigned long which_io; struct sbus_mmap_map mmap_map[TCX_MMAP_ENTRIES]; int lowdepth; }; /* Reset control plane so that WID is 8-bit plane. */ static void __tcx_set_control_plane(struct fb_info *info) { struct tcx_par *par = info->par; u32 __iomem *p, *pend; if (par->lowdepth) return; p = par->cplane; if (p == NULL) return; for (pend = p + info->fix.smem_len; p < pend; p++) { u32 tmp = sbus_readl(p); tmp &= 0xffffff; sbus_writel(tmp, p); } } static void tcx_reset(struct fb_info *info) { struct tcx_par *par = (struct tcx_par *) info->par; unsigned long flags; spin_lock_irqsave(&par->lock, flags); __tcx_set_control_plane(info); spin_unlock_irqrestore(&par->lock, flags); } static int tcx_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { tcx_reset(info); return 0; } /** * tcx_setcolreg - Optional function. Sets a color register. * @regno: boolean, 0 copy local, 1 get_user() function * @red: frame buffer colormap structure * @green: The green value which can be up to 16 bits wide * @blue: The blue value which can be up to 16 bits wide. * @transp: If supported the alpha value which can be up to 16 bits wide. * @info: frame buffer info structure */ static int tcx_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct tcx_par *par = (struct tcx_par *) info->par; struct bt_regs __iomem *bt = par->bt; unsigned long flags; if (regno >= 256) return 1; red >>= 8; green >>= 8; blue >>= 8; spin_lock_irqsave(&par->lock, flags); sbus_writel(regno << 24, &bt->addr); sbus_writel(red << 24, &bt->color_map); sbus_writel(green << 24, &bt->color_map); sbus_writel(blue << 24, &bt->color_map); spin_unlock_irqrestore(&par->lock, flags); return 0; } /** * tcx_blank - Optional function. Blanks the display. * @blank_mode: the blank mode we want. * @info: frame buffer structure that represents a single frame buffer */ static int tcx_blank(int blank, struct fb_info *info) { struct tcx_par *par = (struct tcx_par *) info->par; struct tcx_thc __iomem *thc = par->thc; unsigned long flags; u32 val; spin_lock_irqsave(&par->lock, flags); val = sbus_readl(&thc->thc_misc); switch (blank) { case FB_BLANK_UNBLANK: /* Unblanking */ val &= ~(TCX_THC_MISC_VSYNC_DIS | TCX_THC_MISC_HSYNC_DIS); val |= TCX_THC_MISC_VIDEO; par->flags &= ~TCX_FLAG_BLANKED; break; case FB_BLANK_NORMAL: /* Normal blanking */ val &= ~TCX_THC_MISC_VIDEO; par->flags |= TCX_FLAG_BLANKED; break; case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */ val |= TCX_THC_MISC_VSYNC_DIS; break; case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */ val |= TCX_THC_MISC_HSYNC_DIS; break; case FB_BLANK_POWERDOWN: /* Poweroff */ break; } sbus_writel(val, &thc->thc_misc); spin_unlock_irqrestore(&par->lock, flags); return 0; } static struct sbus_mmap_map __tcx_mmap_map[TCX_MMAP_ENTRIES] = { { .voff = TCX_RAM8BIT, .size = SBUS_MMAP_FBSIZE(1) }, { .voff = TCX_RAM24BIT, .size = SBUS_MMAP_FBSIZE(4) }, { .voff = TCX_UNK3, .size = SBUS_MMAP_FBSIZE(8) }, { .voff = TCX_UNK4, .size = SBUS_MMAP_FBSIZE(8) }, { .voff = TCX_CONTROLPLANE, .size = SBUS_MMAP_FBSIZE(4) }, { .voff = TCX_UNK6, .size = SBUS_MMAP_FBSIZE(8) }, { .voff = TCX_UNK7, .size = SBUS_MMAP_FBSIZE(8) }, { .voff = TCX_TEC, .size = PAGE_SIZE }, { .voff = TCX_BTREGS, .size = PAGE_SIZE }, { .voff = TCX_THC, .size = PAGE_SIZE }, { .voff = TCX_DHC, .size = PAGE_SIZE }, { .voff = TCX_ALT, .size = PAGE_SIZE }, { .voff = TCX_UNK2, .size = 0x20000 }, { .size = 0 } }; static int tcx_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct tcx_par *par = (struct tcx_par *)info->par; return sbusfb_mmap_helper(par->mmap_map, info->fix.smem_start, info->fix.smem_len, par->which_io, vma); } static int tcx_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct tcx_par *par = (struct tcx_par *) info->par; return sbusfb_ioctl_helper(cmd, arg, info, FBTYPE_TCXCOLOR, (par->lowdepth ? 8 : 24), info->fix.smem_len); } /* * Initialisation */ static void tcx_init_fix(struct fb_info *info, int linebytes) { struct tcx_par *par = (struct tcx_par *)info->par; const char *tcx_name; if (par->lowdepth) tcx_name = "TCX8"; else tcx_name = "TCX24"; strlcpy(info->fix.id, tcx_name, sizeof(info->fix.id)); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; info->fix.line_length = linebytes; info->fix.accel = FB_ACCEL_SUN_TCX; } static void tcx_unmap_regs(struct platform_device *op, struct fb_info *info, struct tcx_par *par) { if (par->tec) of_iounmap(&op->resource[7], par->tec, sizeof(struct tcx_tec)); if (par->thc) of_iounmap(&op->resource[9], par->thc, sizeof(struct tcx_thc)); if (par->bt) of_iounmap(&op->resource[8], par->bt, sizeof(struct bt_regs)); if (par->cplane) of_iounmap(&op->resource[4], par->cplane, info->fix.smem_len * sizeof(u32)); if (info->screen_base) of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len); } static int tcx_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; struct fb_info *info; struct tcx_par *par; int linebytes, i, err; info = framebuffer_alloc(sizeof(struct tcx_par), &op->dev); err = -ENOMEM; if (!info) goto out_err; par = info->par; spin_lock_init(&par->lock); par->lowdepth = (of_find_property(dp, "tcx-8-bit", NULL) != NULL); sbusfb_fill_var(&info->var, dp, 8); info->var.red.length = 8; info->var.green.length = 8; info->var.blue.length = 8; linebytes = of_getintprop_default(dp, "linebytes", info->var.xres); info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres); par->tec = of_ioremap(&op->resource[7], 0, sizeof(struct tcx_tec), "tcx tec"); par->thc = of_ioremap(&op->resource[9], 0, sizeof(struct tcx_thc), "tcx thc"); par->bt = of_ioremap(&op->resource[8], 0, sizeof(struct bt_regs), "tcx dac"); info->screen_base = of_ioremap(&op->resource[0], 0, info->fix.smem_len, "tcx ram"); if (!par->tec || !par->thc || !par->bt || !info->screen_base) goto out_unmap_regs; memcpy(&par->mmap_map, &__tcx_mmap_map, sizeof(par->mmap_map)); if (!par->lowdepth) { par->cplane = of_ioremap(&op->resource[4], 0, info->fix.smem_len * sizeof(u32), "tcx cplane"); if (!par->cplane) goto out_unmap_regs; } else { par->mmap_map[1].size = SBUS_MMAP_EMPTY; par->mmap_map[4].size = SBUS_MMAP_EMPTY; par->mmap_map[5].size = SBUS_MMAP_EMPTY; par->mmap_map[6].size = SBUS_MMAP_EMPTY; } info->fix.smem_start = op->resource[0].start; par->which_io = op->resource[0].flags & IORESOURCE_BITS; for (i = 0; i < TCX_MMAP_ENTRIES; i++) { int j; switch (i) { case 10: j = 12; break; case 11: case 12: j = i - 1; break; default: j = i; break; } par->mmap_map[i].poff = op->resource[j].start; } info->flags = FBINFO_DEFAULT; info->fbops = &tcx_ops; /* Initialize brooktree DAC. */ sbus_writel(0x04 << 24, &par->bt->addr); /* color planes */ sbus_writel(0xff << 24, &par->bt->control); sbus_writel(0x05 << 24, &par->bt->addr); sbus_writel(0x00 << 24, &par->bt->control); sbus_writel(0x06 << 24, &par->bt->addr); /* overlay plane */ sbus_writel(0x73 << 24, &par->bt->control); sbus_writel(0x07 << 24, &par->bt->addr); sbus_writel(0x00 << 24, &par->bt->control); tcx_reset(info); tcx_blank(FB_BLANK_UNBLANK, info); if (fb_alloc_cmap(&info->cmap, 256, 0)) goto out_unmap_regs; fb_set_cmap(&info->cmap, info); tcx_init_fix(info, linebytes); err = register_framebuffer(info); if (err < 0) goto out_dealloc_cmap; dev_set_drvdata(&op->dev, info); printk(KERN_INFO "%s: TCX at %lx:%lx, %s\n", dp->full_name, par->which_io, info->fix.smem_start, par->lowdepth ? "8-bit only" : "24-bit depth"); return 0; out_dealloc_cmap: fb_dealloc_cmap(&info->cmap); out_unmap_regs: tcx_unmap_regs(op, info, par); framebuffer_release(info); out_err: return err; } static int tcx_remove(struct platform_device *op) { struct fb_info *info = dev_get_drvdata(&op->dev); struct tcx_par *par = info->par; unregister_framebuffer(info); fb_dealloc_cmap(&info->cmap); tcx_unmap_regs(op, info, par); framebuffer_release(info); return 0; } static const struct of_device_id tcx_match[] = { { .name = "SUNW,tcx", }, {}, }; MODULE_DEVICE_TABLE(of, tcx_match); static struct platform_driver tcx_driver = { .driver = { .name = "tcx", .owner = THIS_MODULE, .of_match_table = tcx_match, }, .probe = tcx_probe, .remove = tcx_remove, }; static int __init tcx_init(void) { if (fb_get_options("tcxfb", NULL)) return -ENODEV; return platform_driver_register(&tcx_driver); } static void __exit tcx_exit(void) { platform_driver_unregister(&tcx_driver); } module_init(tcx_init); module_exit(tcx_exit); MODULE_DESCRIPTION("framebuffer driver for TCX chipsets"); MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
gpl-2.0
JDevs/OWL-Predator-KERNEL
arch/powerpc/platforms/44x/ppc44x_simple.c
845
2422
/* * Generic PowerPC 44x platform support * * Copyright 2008 IBM Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; version 2 of the License. * * This implements simple platform support for PowerPC 44x chips. This is * mostly used for eval boards or other simple and "generic" 44x boards. If * your board has custom functions or hardware, then you will likely want to * implement your own board.c file to accommodate it. */ #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/ppc4xx.h> #include <asm/prom.h> #include <asm/time.h> #include <asm/udbg.h> #include <asm/uic.h> #include <linux/init.h> #include <linux/of_platform.h> static __initdata struct of_device_id ppc44x_of_bus[] = { { .compatible = "ibm,plb4", }, { .compatible = "ibm,opb", }, { .compatible = "ibm,ebc", }, { .compatible = "simple-bus", }, {}, }; static int __init ppc44x_device_probe(void) { of_platform_bus_probe(NULL, ppc44x_of_bus, NULL); return 0; } machine_device_initcall(ppc44x_simple, ppc44x_device_probe); /* This is the list of boards that can be supported by this simple * platform code. This does _not_ mean the boards are compatible, * as they most certainly are not from a device tree perspective. * However, their differences are handled by the device tree and the * drivers and therefore they don't need custom board support files. * * Again, if your board needs to do things differently then create a * board.c file for it rather than adding it to this list. */ static char *board[] __initdata = { "amcc,arches", "amcc,bamboo", "amcc,canyonlands", "amcc,glacier", "ibm,ebony", "amcc,eiger", "amcc,katmai", "amcc,rainier", "amcc,redwood", "amcc,sequoia", "amcc,taishan", "amcc,yosemite", "mosaixtech,icon" }; static int __init ppc44x_probe(void) { unsigned long root = of_get_flat_dt_root(); int i = 0; for (i = 0; i < ARRAY_SIZE(board); i++) { if (of_flat_dt_is_compatible(root, board[i])) { ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC); return 1; } } return 0; } define_machine(ppc44x_simple) { .name = "PowerPC 44x Platform", .probe = ppc44x_probe, .progress = udbg_progress, .init_IRQ = uic_init_tree, .get_irq = uic_get_irq, .restart = ppc4xx_reset_system, .calibrate_decr = generic_calibrate_decr, };
gpl-2.0
weboo/kernel-nexus-s
drivers/video/backlight/progear_bl.c
1101
3983
/* * Backlight Driver for Frontpath ProGear HX1050+ * * Copyright (c) 2006 Marcin Juszkiewicz * * Based on Progear LCD driver by M Schacht * <mschacht at alumni dot washington dot edu> * * Based on Sharp's Corgi Backlight Driver * Based on Backlight Driver for HP Jornada 680 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/pci.h> #define PMU_LPCR 0xB0 #define SB_MPS1 0x61 #define HW_LEVEL_MAX 0x77 #define HW_LEVEL_MIN 0x4f static struct pci_dev *pmu_dev = NULL; static struct pci_dev *sb_dev = NULL; static int progearbl_set_intensity(struct backlight_device *bd) { int intensity = bd->props.brightness; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.fb_blank != FB_BLANK_UNBLANK) intensity = 0; pci_write_config_byte(pmu_dev, PMU_LPCR, intensity + HW_LEVEL_MIN); return 0; } static int progearbl_get_intensity(struct backlight_device *bd) { u8 intensity; pci_read_config_byte(pmu_dev, PMU_LPCR, &intensity); return intensity - HW_LEVEL_MIN; } static const struct backlight_ops progearbl_ops = { .get_brightness = progearbl_get_intensity, .update_status = progearbl_set_intensity, }; static int progearbl_probe(struct platform_device *pdev) { struct backlight_properties props; u8 temp; struct backlight_device *progear_backlight_device; int ret; pmu_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, NULL); if (!pmu_dev) { printk("ALI M7101 PMU not found.\n"); return -ENODEV; } sb_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); if (!sb_dev) { printk("ALI 1533 SB not found.\n"); ret = -ENODEV; goto put_pmu; } /* Set SB_MPS1 to enable brightness control. */ pci_read_config_byte(sb_dev, SB_MPS1, &temp); pci_write_config_byte(sb_dev, SB_MPS1, temp | 0x20); memset(&props, 0, sizeof(struct backlight_properties)); props.max_brightness = HW_LEVEL_MAX - HW_LEVEL_MIN; progear_backlight_device = backlight_device_register("progear-bl", &pdev->dev, NULL, &progearbl_ops, &props); if (IS_ERR(progear_backlight_device)) { ret = PTR_ERR(progear_backlight_device); goto put_sb; } platform_set_drvdata(pdev, progear_backlight_device); progear_backlight_device->props.power = FB_BLANK_UNBLANK; progear_backlight_device->props.brightness = HW_LEVEL_MAX - HW_LEVEL_MIN; progearbl_set_intensity(progear_backlight_device); return 0; put_sb: pci_dev_put(sb_dev); put_pmu: pci_dev_put(pmu_dev); return ret; } static int progearbl_remove(struct platform_device *pdev) { struct backlight_device *bd = platform_get_drvdata(pdev); backlight_device_unregister(bd); return 0; } static struct platform_driver progearbl_driver = { .probe = progearbl_probe, .remove = progearbl_remove, .driver = { .name = "progear-bl", }, }; static struct platform_device *progearbl_device; static int __init progearbl_init(void) { int ret = platform_driver_register(&progearbl_driver); if (ret) return ret; progearbl_device = platform_device_register_simple("progear-bl", -1, NULL, 0); if (IS_ERR(progearbl_device)) { platform_driver_unregister(&progearbl_driver); return PTR_ERR(progearbl_device); } return 0; } static void __exit progearbl_exit(void) { pci_dev_put(pmu_dev); pci_dev_put(sb_dev); platform_device_unregister(progearbl_device); platform_driver_unregister(&progearbl_driver); } module_init(progearbl_init); module_exit(progearbl_exit); MODULE_AUTHOR("Marcin Juszkiewicz <linux@hrw.one.pl>"); MODULE_DESCRIPTION("ProGear Backlight Driver"); MODULE_LICENSE("GPL");
gpl-2.0
Austinpb/linux
drivers/hwmon/jz4740-hwmon.c
1101
4348
/* * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> * JZ4740 SoC HWMON driver * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/err.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/completion.h> #include <linux/mfd/core.h> #include <linux/hwmon.h> struct jz4740_hwmon { void __iomem *base; int irq; const struct mfd_cell *cell; struct device *hwmon; struct completion read_completion; struct mutex lock; }; static ssize_t jz4740_hwmon_show_name(struct device *dev, struct device_attribute *dev_attr, char *buf) { return sprintf(buf, "jz4740\n"); } static irqreturn_t jz4740_hwmon_irq(int irq, void *data) { struct jz4740_hwmon *hwmon = data; complete(&hwmon->read_completion); return IRQ_HANDLED; } static ssize_t jz4740_hwmon_read_adcin(struct device *dev, struct device_attribute *dev_attr, char *buf) { struct jz4740_hwmon *hwmon = dev_get_drvdata(dev); struct completion *completion = &hwmon->read_completion; long t; unsigned long val; int ret; mutex_lock(&hwmon->lock); reinit_completion(completion); enable_irq(hwmon->irq); hwmon->cell->enable(to_platform_device(dev)); t = wait_for_completion_interruptible_timeout(completion, HZ); if (t > 0) { val = readw(hwmon->base) & 0xfff; val = (val * 3300) >> 12; ret = sprintf(buf, "%lu\n", val); } else { ret = t ? t : -ETIMEDOUT; } hwmon->cell->disable(to_platform_device(dev)); disable_irq(hwmon->irq); mutex_unlock(&hwmon->lock); return ret; } static DEVICE_ATTR(name, S_IRUGO, jz4740_hwmon_show_name, NULL); static DEVICE_ATTR(in0_input, S_IRUGO, jz4740_hwmon_read_adcin, NULL); static struct attribute *jz4740_hwmon_attributes[] = { &dev_attr_name.attr, &dev_attr_in0_input.attr, NULL }; static const struct attribute_group jz4740_hwmon_attr_group = { .attrs = jz4740_hwmon_attributes, }; static int jz4740_hwmon_probe(struct platform_device *pdev) { int ret; struct jz4740_hwmon *hwmon; struct resource *mem; hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon), GFP_KERNEL); if (!hwmon) return -ENOMEM; hwmon->cell = mfd_get_cell(pdev); hwmon->irq = platform_get_irq(pdev, 0); if (hwmon->irq < 0) { dev_err(&pdev->dev, "Failed to get platform irq: %d\n", hwmon->irq); return hwmon->irq; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); hwmon->base = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(hwmon->base)) return PTR_ERR(hwmon->base); init_completion(&hwmon->read_completion); mutex_init(&hwmon->lock); platform_set_drvdata(pdev, hwmon); ret = devm_request_irq(&pdev->dev, hwmon->irq, jz4740_hwmon_irq, 0, pdev->name, hwmon); if (ret) { dev_err(&pdev->dev, "Failed to request irq: %d\n", ret); return ret; } disable_irq(hwmon->irq); ret = sysfs_create_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group); if (ret) { dev_err(&pdev->dev, "Failed to create sysfs group: %d\n", ret); return ret; } hwmon->hwmon = hwmon_device_register(&pdev->dev); if (IS_ERR(hwmon->hwmon)) { ret = PTR_ERR(hwmon->hwmon); goto err_remove_file; } return 0; err_remove_file: sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group); return ret; } static int jz4740_hwmon_remove(struct platform_device *pdev) { struct jz4740_hwmon *hwmon = platform_get_drvdata(pdev); hwmon_device_unregister(hwmon->hwmon); sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group); return 0; } static struct platform_driver jz4740_hwmon_driver = { .probe = jz4740_hwmon_probe, .remove = jz4740_hwmon_remove, .driver = { .name = "jz4740-hwmon", }, }; module_platform_driver(jz4740_hwmon_driver); MODULE_DESCRIPTION("JZ4740 SoC HWMON driver"); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:jz4740-hwmon");
gpl-2.0
freexperia/android_kernel_semc_msm7x30
arch/arm/mach-msm/qdsp5v2/adsp_info.c
1357
5214
/* * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "adsp.h" /* Firmware modules */ #define QDSP_MODULE_KERNEL 0x0106dd4e #define QDSP_MODULE_AFETASK 0x0106dd6f #define QDSP_MODULE_AUDPLAY0TASK 0x0106dd70 #define QDSP_MODULE_AUDPLAY1TASK 0x0106dd71 #define QDSP_MODULE_AUDPPTASK 0x0106dd72 #define QDSP_MODULE_VIDEOTASK 0x0106dd73 #define QDSP_MODULE_VIDEO_AAC_VOC 0x0106dd74 #define QDSP_MODULE_PCM_DEC 0x0106dd75 #define QDSP_MODULE_AUDIO_DEC_MP3 0x0106dd76 #define QDSP_MODULE_AUDIO_DEC_AAC 0x0106dd77 #define QDSP_MODULE_AUDIO_DEC_WMA 0x0106dd78 #define QDSP_MODULE_HOSTPCM 0x0106dd79 #define QDSP_MODULE_DTMF 0x0106dd7a #define QDSP_MODULE_AUDRECTASK 0x0106dd7b #define QDSP_MODULE_AUDPREPROCTASK 0x0106dd7c #define QDSP_MODULE_SBC_ENC 0x0106dd7d #define QDSP_MODULE_VOC_UMTS 0x0106dd9a #define QDSP_MODULE_VOC_CDMA 0x0106dd98 #define QDSP_MODULE_VOC_PCM 0x0106dd7f #define QDSP_MODULE_VOCENCTASK 0x0106dd80 #define QDSP_MODULE_VOCDECTASK 0x0106dd81 #define QDSP_MODULE_VOICEPROCTASK 0x0106dd82 #define QDSP_MODULE_VIDEOENCTASK 0x0106dd83 #define QDSP_MODULE_VFETASK 0x0106dd84 #define QDSP_MODULE_WAV_ENC 0x0106dd85 #define QDSP_MODULE_AACLC_ENC 0x0106dd86 #define QDSP_MODULE_VIDEO_AMR 0x0106dd87 #define QDSP_MODULE_VOC_AMR 0x0106dd88 #define QDSP_MODULE_VOC_EVRC 0x0106dd89 #define QDSP_MODULE_VOC_13K 0x0106dd8a #define QDSP_MODULE_VOC_FGV 0x0106dd8b #define QDSP_MODULE_DIAGTASK 0x0106dd8c #define QDSP_MODULE_JPEGTASK 0x0106dd8d #define QDSP_MODULE_LPMTASK 0x0106dd8e #define QDSP_MODULE_QCAMTASK 0x0106dd8f #define QDSP_MODULE_MODMATHTASK 0x0106dd90 #define QDSP_MODULE_AUDPLAY2TASK 0x0106dd91 #define QDSP_MODULE_AUDPLAY3TASK 0x0106dd92 #define QDSP_MODULE_AUDPLAY4TASK 0x0106dd93 #define QDSP_MODULE_GRAPHICSTASK 0x0106dd94 #define QDSP_MODULE_MIDI 0x0106dd95 #define QDSP_MODULE_GAUDIO 0x0106dd96 #define QDSP_MODULE_VDEC_LP_MODE 0x0106dd97 #define QDSP_MODULE_VIDEO_AAC_VOC_TURBO 0x01089f77 #define QDSP_MODULE_VIDEO_AMR_TURBO 0x01089f78 #define QDSP_MODULE_WM_TURBO_MODE 0x01089f79 #define QDSP_MODULE_VDEC_LP_MODE_TURBO 0x01089f7a #define QDSP_MODULE_AUDREC0TASK 0x0109696f #define QDSP_MODULE_AUDREC1TASK 0x01096970 #define QDSP_MODULE_AUDREC2TASK 0x010a2f59 #define QDSP_MODULE_MAX 0x7fffffff /* DO NOT USE: Force this enum to be a 32bit type to improve speed */ #define QDSP_MODULE_32BIT_DUMMY 0x10000 static uint32_t *qdsp_task_to_module[IMG_MAX]; static uint32_t *qdsp_queue_offset_table[IMG_MAX]; #define QDSP_MODULE(n, clkname, clkrate, verify_cmd_func, patch_event_func) \ { .name = #n, .pdev_name = "adsp_" #n, .id = QDSP_MODULE_##n, \ .clk_name = clkname, .clk_rate = clkrate, \ .verify_cmd = verify_cmd_func, .patch_event = patch_event_func } static struct adsp_module_info module_info[] = { QDSP_MODULE(AUDPLAY0TASK, NULL, 0, NULL, NULL), QDSP_MODULE(AUDPLAY1TASK, NULL, 0, NULL, NULL), QDSP_MODULE(AUDPLAY2TASK, NULL, 0, NULL, NULL), QDSP_MODULE(AUDPLAY3TASK, NULL, 0, NULL, NULL), QDSP_MODULE(AUDPPTASK, NULL, 0, NULL, NULL), QDSP_MODULE(AUDPREPROCTASK, NULL, 0, NULL, NULL), QDSP_MODULE(AFETASK , NULL, 0, NULL, NULL), QDSP_MODULE(AUDREC0TASK, NULL, 0, NULL, NULL), QDSP_MODULE(AUDREC1TASK, NULL, 0, NULL, NULL), QDSP_MODULE(AUDREC2TASK, NULL, 0, NULL, NULL), }; int adsp_init_info(struct adsp_info *info) { uint32_t img_num; info->send_irq = 0x00c00200; info->read_ctrl = 0x00400038; info->write_ctrl = 0x00400034; info->max_msg16_size = 193; info->max_msg32_size = 8; for (img_num = 0; img_num < IMG_MAX; img_num++) qdsp_queue_offset_table[img_num] = &info->init_info_ptr->queue_offsets[img_num][0]; for (img_num = 0; img_num < IMG_MAX; img_num++) qdsp_task_to_module[img_num] = &info->init_info_ptr->task_to_module_tbl[img_num][0]; info->max_task_id = ENTRIES_MAX; info->max_module_id = QDSP_MODULE_MAX - 1; info->max_queue_id = QDSP_MAX_NUM_QUEUES; info->max_image_id = 0; info->queue_offset = qdsp_queue_offset_table; info->task_to_module = qdsp_task_to_module; info->module_count = ARRAY_SIZE(module_info); info->module = module_info; return 0; }
gpl-2.0
PhilZ-cwm6/android_kernel_htc_vigor
drivers/usb/musb/musb_gadget_ep0.c
2125
27667
/* * MUSB OTG peripheral driver ep0 handling * * Copyright 2005 Mentor Graphics Corporation * Copyright (C) 2005-2006 by Texas Instruments * Copyright (C) 2006-2007 Nokia Corporation * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/device.h> #include <linux/interrupt.h> #include "musb_core.h" /* ep0 is always musb->endpoints[0].ep_in */ #define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0]) /* * locking note: we use only the controller lock, for simpler correctness. * It's always held with IRQs blocked. * * It protects the ep0 request queue as well as ep0_state, not just the * controller and indexed registers. And that lock stays held unless it * needs to be dropped to allow reentering this driver ... like upcalls to * the gadget driver, or adjusting endpoint halt status. */ static char *decode_ep0stage(u8 stage) { switch (stage) { case MUSB_EP0_STAGE_IDLE: return "idle"; case MUSB_EP0_STAGE_SETUP: return "setup"; case MUSB_EP0_STAGE_TX: return "in"; case MUSB_EP0_STAGE_RX: return "out"; case MUSB_EP0_STAGE_ACKWAIT: return "wait"; case MUSB_EP0_STAGE_STATUSIN: return "in/status"; case MUSB_EP0_STAGE_STATUSOUT: return "out/status"; default: return "?"; } } /* handle a standard GET_STATUS request * Context: caller holds controller lock */ static int service_tx_status_request( struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) { void __iomem *mbase = musb->mregs; int handled = 1; u8 result[2], epnum = 0; const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK; result[1] = 0; switch (recip) { case USB_RECIP_DEVICE: result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED; result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP; #ifdef CONFIG_USB_MUSB_OTG if (musb->g.is_otg) { result[0] |= musb->g.b_hnp_enable << USB_DEVICE_B_HNP_ENABLE; result[0] |= musb->g.a_alt_hnp_support << USB_DEVICE_A_ALT_HNP_SUPPORT; result[0] |= musb->g.a_hnp_support << USB_DEVICE_A_HNP_SUPPORT; } #endif break; case USB_RECIP_INTERFACE: result[0] = 0; break; case USB_RECIP_ENDPOINT: { int is_in; struct musb_ep *ep; u16 tmp; void __iomem *regs; epnum = (u8) ctrlrequest->wIndex; if (!epnum) { result[0] = 0; break; } is_in = epnum & USB_DIR_IN; if (is_in) { epnum &= 0x0f; ep = &musb->endpoints[epnum].ep_in; } else { ep = &musb->endpoints[epnum].ep_out; } regs = musb->endpoints[epnum].regs; if (epnum >= MUSB_C_NUM_EPS || !ep->desc) { handled = -EINVAL; break; } musb_ep_select(mbase, epnum); if (is_in) tmp = musb_readw(regs, MUSB_TXCSR) & MUSB_TXCSR_P_SENDSTALL; else tmp = musb_readw(regs, MUSB_RXCSR) & MUSB_RXCSR_P_SENDSTALL; musb_ep_select(mbase, 0); result[0] = tmp ? 1 : 0; } break; default: /* class, vendor, etc ... delegate */ handled = 0; break; } /* fill up the fifo; caller updates csr0 */ if (handled > 0) { u16 len = le16_to_cpu(ctrlrequest->wLength); if (len > 2) len = 2; musb_write_fifo(&musb->endpoints[0], len, result); } return handled; } /* * handle a control-IN request, the end0 buffer contains the current request * that is supposed to be a standard control request. Assumes the fifo to * be at least 2 bytes long. * * @return 0 if the request was NOT HANDLED, * < 0 when error * > 0 when the request is processed * * Context: caller holds controller lock */ static int service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) { int handled = 0; /* not handled */ if ((ctrlrequest->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { switch (ctrlrequest->bRequest) { case USB_REQ_GET_STATUS: handled = service_tx_status_request(musb, ctrlrequest); break; /* case USB_REQ_SYNC_FRAME: */ default: break; } } return handled; } /* * Context: caller holds controller lock */ static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req) { musb_g_giveback(&musb->endpoints[0].ep_in, req, 0); } /* * Tries to start B-device HNP negotiation if enabled via sysfs */ static inline void musb_try_b_hnp_enable(struct musb *musb) { void __iomem *mbase = musb->mregs; u8 devctl; dev_dbg(musb->controller, "HNP: Setting HR\n"); devctl = musb_readb(mbase, MUSB_DEVCTL); musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR); } /* * Handle all control requests with no DATA stage, including standard * requests such as: * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized * always delegated to the gadget driver * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE * always handled here, except for class/vendor/... features * * Context: caller holds controller lock */ static int service_zero_data_request(struct musb *musb, struct usb_ctrlrequest *ctrlrequest) __releases(musb->lock) __acquires(musb->lock) { int handled = -EINVAL; void __iomem *mbase = musb->mregs; const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK; /* the gadget driver handles everything except what we MUST handle */ if ((ctrlrequest->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { switch (ctrlrequest->bRequest) { case USB_REQ_SET_ADDRESS: /* change it after the status stage */ musb->set_address = true; musb->address = (u8) (ctrlrequest->wValue & 0x7f); handled = 1; break; case USB_REQ_CLEAR_FEATURE: switch (recip) { case USB_RECIP_DEVICE: if (ctrlrequest->wValue != USB_DEVICE_REMOTE_WAKEUP) break; musb->may_wakeup = 0; handled = 1; break; case USB_RECIP_INTERFACE: break; case USB_RECIP_ENDPOINT:{ const u8 epnum = ctrlrequest->wIndex & 0x0f; struct musb_ep *musb_ep; struct musb_hw_ep *ep; struct musb_request *request; void __iomem *regs; int is_in; u16 csr; if (epnum == 0 || epnum >= MUSB_C_NUM_EPS || ctrlrequest->wValue != USB_ENDPOINT_HALT) break; ep = musb->endpoints + epnum; regs = ep->regs; is_in = ctrlrequest->wIndex & USB_DIR_IN; if (is_in) musb_ep = &ep->ep_in; else musb_ep = &ep->ep_out; if (!musb_ep->desc) break; handled = 1; /* Ignore request if endpoint is wedged */ if (musb_ep->wedged) break; musb_ep_select(mbase, epnum); if (is_in) { csr = musb_readw(regs, MUSB_TXCSR); csr |= MUSB_TXCSR_CLRDATATOG | MUSB_TXCSR_P_WZC_BITS; csr &= ~(MUSB_TXCSR_P_SENDSTALL | MUSB_TXCSR_P_SENTSTALL | MUSB_TXCSR_TXPKTRDY); musb_writew(regs, MUSB_TXCSR, csr); } else { csr = musb_readw(regs, MUSB_RXCSR); csr |= MUSB_RXCSR_CLRDATATOG | MUSB_RXCSR_P_WZC_BITS; csr &= ~(MUSB_RXCSR_P_SENDSTALL | MUSB_RXCSR_P_SENTSTALL); musb_writew(regs, MUSB_RXCSR, csr); } /* Maybe start the first request in the queue */ request = next_request(musb_ep); if (!musb_ep->busy && request) { dev_dbg(musb->controller, "restarting the request\n"); musb_ep_restart(musb, request); } /* select ep0 again */ musb_ep_select(mbase, 0); } break; default: /* class, vendor, etc ... delegate */ handled = 0; break; } break; case USB_REQ_SET_FEATURE: switch (recip) { case USB_RECIP_DEVICE: handled = 1; switch (ctrlrequest->wValue) { case USB_DEVICE_REMOTE_WAKEUP: musb->may_wakeup = 1; break; case USB_DEVICE_TEST_MODE: if (musb->g.speed != USB_SPEED_HIGH) goto stall; if (ctrlrequest->wIndex & 0xff) goto stall; switch (ctrlrequest->wIndex >> 8) { case 1: pr_debug("TEST_J\n"); /* TEST_J */ musb->test_mode_nr = MUSB_TEST_J; break; case 2: /* TEST_K */ pr_debug("TEST_K\n"); musb->test_mode_nr = MUSB_TEST_K; break; case 3: /* TEST_SE0_NAK */ pr_debug("TEST_SE0_NAK\n"); musb->test_mode_nr = MUSB_TEST_SE0_NAK; break; case 4: /* TEST_PACKET */ pr_debug("TEST_PACKET\n"); musb->test_mode_nr = MUSB_TEST_PACKET; break; case 0xc0: /* TEST_FORCE_HS */ pr_debug("TEST_FORCE_HS\n"); musb->test_mode_nr = MUSB_TEST_FORCE_HS; break; case 0xc1: /* TEST_FORCE_FS */ pr_debug("TEST_FORCE_FS\n"); musb->test_mode_nr = MUSB_TEST_FORCE_FS; break; case 0xc2: /* TEST_FIFO_ACCESS */ pr_debug("TEST_FIFO_ACCESS\n"); musb->test_mode_nr = MUSB_TEST_FIFO_ACCESS; break; case 0xc3: /* TEST_FORCE_HOST */ pr_debug("TEST_FORCE_HOST\n"); musb->test_mode_nr = MUSB_TEST_FORCE_HOST; break; default: goto stall; } /* enter test mode after irq */ if (handled > 0) musb->test_mode = true; break; #ifdef CONFIG_USB_MUSB_OTG case USB_DEVICE_B_HNP_ENABLE: if (!musb->g.is_otg) goto stall; musb->g.b_hnp_enable = 1; musb_try_b_hnp_enable(musb); break; case USB_DEVICE_A_HNP_SUPPORT: if (!musb->g.is_otg) goto stall; musb->g.a_hnp_support = 1; break; case USB_DEVICE_A_ALT_HNP_SUPPORT: if (!musb->g.is_otg) goto stall; musb->g.a_alt_hnp_support = 1; break; #endif case USB_DEVICE_DEBUG_MODE: handled = 0; break; stall: default: handled = -EINVAL; break; } break; case USB_RECIP_INTERFACE: break; case USB_RECIP_ENDPOINT:{ const u8 epnum = ctrlrequest->wIndex & 0x0f; struct musb_ep *musb_ep; struct musb_hw_ep *ep; void __iomem *regs; int is_in; u16 csr; if (epnum == 0 || epnum >= MUSB_C_NUM_EPS || ctrlrequest->wValue != USB_ENDPOINT_HALT) break; ep = musb->endpoints + epnum; regs = ep->regs; is_in = ctrlrequest->wIndex & USB_DIR_IN; if (is_in) musb_ep = &ep->ep_in; else musb_ep = &ep->ep_out; if (!musb_ep->desc) break; musb_ep_select(mbase, epnum); if (is_in) { csr = musb_readw(regs, MUSB_TXCSR); if (csr & MUSB_TXCSR_FIFONOTEMPTY) csr |= MUSB_TXCSR_FLUSHFIFO; csr |= MUSB_TXCSR_P_SENDSTALL | MUSB_TXCSR_CLRDATATOG | MUSB_TXCSR_P_WZC_BITS; musb_writew(regs, MUSB_TXCSR, csr); } else { csr = musb_readw(regs, MUSB_RXCSR); csr |= MUSB_RXCSR_P_SENDSTALL | MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG | MUSB_RXCSR_P_WZC_BITS; musb_writew(regs, MUSB_RXCSR, csr); } /* select ep0 again */ musb_ep_select(mbase, 0); handled = 1; } break; default: /* class, vendor, etc ... delegate */ handled = 0; break; } break; default: /* delegate SET_CONFIGURATION, etc */ handled = 0; } } else handled = 0; return handled; } /* we have an ep0out data packet * Context: caller holds controller lock */ static void ep0_rxstate(struct musb *musb) { void __iomem *regs = musb->control_ep->regs; struct musb_request *request; struct usb_request *req; u16 count, csr; request = next_ep0_request(musb); req = &request->request; /* read packet and ack; or stall because of gadget driver bug: * should have provided the rx buffer before setup() returned. */ if (req) { void *buf = req->buf + req->actual; unsigned len = req->length - req->actual; /* read the buffer */ count = musb_readb(regs, MUSB_COUNT0); if (count > len) { req->status = -EOVERFLOW; count = len; } musb_read_fifo(&musb->endpoints[0], count, buf); req->actual += count; csr = MUSB_CSR0_P_SVDRXPKTRDY; if (count < 64 || req->actual == req->length) { musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; csr |= MUSB_CSR0_P_DATAEND; } else req = NULL; } else csr = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL; /* Completion handler may choose to stall, e.g. because the * message just received holds invalid data. */ if (req) { musb->ackpend = csr; musb_g_ep0_giveback(musb, req); if (!musb->ackpend) return; musb->ackpend = 0; } musb_ep_select(musb->mregs, 0); musb_writew(regs, MUSB_CSR0, csr); } /* * transmitting to the host (IN), this code might be called from IRQ * and from kernel thread. * * Context: caller holds controller lock */ static void ep0_txstate(struct musb *musb) { void __iomem *regs = musb->control_ep->regs; struct musb_request *req = next_ep0_request(musb); struct usb_request *request; u16 csr = MUSB_CSR0_TXPKTRDY; u8 *fifo_src; u8 fifo_count; if (!req) { /* WARN_ON(1); */ dev_dbg(musb->controller, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0)); return; } request = &req->request; /* load the data */ fifo_src = (u8 *) request->buf + request->actual; fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE, request->length - request->actual); musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src); request->actual += fifo_count; /* update the flags */ if (fifo_count < MUSB_MAX_END0_PACKET || (request->actual == request->length && !request->zero)) { musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; csr |= MUSB_CSR0_P_DATAEND; } else request = NULL; /* report completions as soon as the fifo's loaded; there's no * win in waiting till this last packet gets acked. (other than * very precise fault reporting, needed by USB TMC; possible with * this hardware, but not usable from portable gadget drivers.) */ if (request) { musb->ackpend = csr; musb_g_ep0_giveback(musb, request); if (!musb->ackpend) return; musb->ackpend = 0; } /* send it out, triggering a "txpktrdy cleared" irq */ musb_ep_select(musb->mregs, 0); musb_writew(regs, MUSB_CSR0, csr); } /* * Read a SETUP packet (struct usb_ctrlrequest) from the hardware. * Fields are left in USB byte-order. * * Context: caller holds controller lock. */ static void musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req) { struct musb_request *r; void __iomem *regs = musb->control_ep->regs; musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req); /* NOTE: earlier 2.6 versions changed setup packets to host * order, but now USB packets always stay in USB byte order. */ dev_dbg(musb->controller, "SETUP req%02x.%02x v%04x i%04x l%d\n", req->bRequestType, req->bRequest, le16_to_cpu(req->wValue), le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength)); /* clean up any leftover transfers */ r = next_ep0_request(musb); if (r) musb_g_ep0_giveback(musb, &r->request); /* For zero-data requests we want to delay the STATUS stage to * avoid SETUPEND errors. If we read data (OUT), delay accepting * packets until there's a buffer to store them in. * * If we write data, the controller acts happier if we enable * the TX FIFO right away, and give the controller a moment * to switch modes... */ musb->set_address = false; musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY; if (req->wLength == 0) { if (req->bRequestType & USB_DIR_IN) musb->ackpend |= MUSB_CSR0_TXPKTRDY; musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT; } else if (req->bRequestType & USB_DIR_IN) { musb->ep0_state = MUSB_EP0_STAGE_TX; musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY); while ((musb_readw(regs, MUSB_CSR0) & MUSB_CSR0_RXPKTRDY) != 0) cpu_relax(); musb->ackpend = 0; } else musb->ep0_state = MUSB_EP0_STAGE_RX; } static int forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) __releases(musb->lock) __acquires(musb->lock) { int retval; if (!musb->gadget_driver) return -EOPNOTSUPP; spin_unlock(&musb->lock); retval = musb->gadget_driver->setup(&musb->g, ctrlrequest); spin_lock(&musb->lock); return retval; } /* * Handle peripheral ep0 interrupt * * Context: irq handler; we won't re-enter the driver that way. */ irqreturn_t musb_g_ep0_irq(struct musb *musb) { u16 csr; u16 len; void __iomem *mbase = musb->mregs; void __iomem *regs = musb->endpoints[0].regs; irqreturn_t retval = IRQ_NONE; musb_ep_select(mbase, 0); /* select ep0 */ csr = musb_readw(regs, MUSB_CSR0); len = musb_readb(regs, MUSB_COUNT0); dev_dbg(musb->controller, "csr %04x, count %d, myaddr %d, ep0stage %s\n", csr, len, musb_readb(mbase, MUSB_FADDR), decode_ep0stage(musb->ep0_state)); /* I sent a stall.. need to acknowledge it now.. */ if (csr & MUSB_CSR0_P_SENTSTALL) { musb_writew(regs, MUSB_CSR0, csr & ~MUSB_CSR0_P_SENTSTALL); retval = IRQ_HANDLED; musb->ep0_state = MUSB_EP0_STAGE_IDLE; csr = musb_readw(regs, MUSB_CSR0); } /* request ended "early" */ if (csr & MUSB_CSR0_P_SETUPEND) { musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND); retval = IRQ_HANDLED; /* Transition into the early status phase */ switch (musb->ep0_state) { case MUSB_EP0_STAGE_TX: musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; break; case MUSB_EP0_STAGE_RX: musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; break; default: ERR("SetupEnd came in a wrong ep0stage %s\n", decode_ep0stage(musb->ep0_state)); } csr = musb_readw(regs, MUSB_CSR0); /* NOTE: request may need completion */ } /* docs from Mentor only describe tx, rx, and idle/setup states. * we need to handle nuances around status stages, and also the * case where status and setup stages come back-to-back ... */ switch (musb->ep0_state) { case MUSB_EP0_STAGE_TX: /* irq on clearing txpktrdy */ if ((csr & MUSB_CSR0_TXPKTRDY) == 0) { ep0_txstate(musb); retval = IRQ_HANDLED; } break; case MUSB_EP0_STAGE_RX: /* irq on set rxpktrdy */ if (csr & MUSB_CSR0_RXPKTRDY) { ep0_rxstate(musb); retval = IRQ_HANDLED; } break; case MUSB_EP0_STAGE_STATUSIN: /* end of sequence #2 (OUT/RX state) or #3 (no data) */ /* update address (if needed) only @ the end of the * status phase per usb spec, which also guarantees * we get 10 msec to receive this irq... until this * is done we won't see the next packet. */ if (musb->set_address) { musb->set_address = false; musb_writeb(mbase, MUSB_FADDR, musb->address); } /* enter test mode if needed (exit by reset) */ else if (musb->test_mode) { dev_dbg(musb->controller, "entering TESTMODE\n"); if (MUSB_TEST_PACKET == musb->test_mode_nr) musb_load_testpacket(musb); musb_writeb(mbase, MUSB_TESTMODE, musb->test_mode_nr); } /* FALLTHROUGH */ case MUSB_EP0_STAGE_STATUSOUT: /* end of sequence #1: write to host (TX state) */ { struct musb_request *req; req = next_ep0_request(musb); if (req) musb_g_ep0_giveback(musb, &req->request); } /* * In case when several interrupts can get coalesced, * check to see if we've already received a SETUP packet... */ if (csr & MUSB_CSR0_RXPKTRDY) goto setup; retval = IRQ_HANDLED; musb->ep0_state = MUSB_EP0_STAGE_IDLE; break; case MUSB_EP0_STAGE_IDLE: /* * This state is typically (but not always) indiscernible * from the status states since the corresponding interrupts * tend to happen within too little period of time (with only * a zero-length packet in between) and so get coalesced... */ retval = IRQ_HANDLED; musb->ep0_state = MUSB_EP0_STAGE_SETUP; /* FALLTHROUGH */ case MUSB_EP0_STAGE_SETUP: setup: if (csr & MUSB_CSR0_RXPKTRDY) { struct usb_ctrlrequest setup; int handled = 0; if (len != 8) { ERR("SETUP packet len %d != 8 ?\n", len); break; } musb_read_setup(musb, &setup); retval = IRQ_HANDLED; /* sometimes the RESET won't be reported */ if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) { u8 power; printk(KERN_NOTICE "%s: peripheral reset " "irq lost!\n", musb_driver_name); power = musb_readb(mbase, MUSB_POWER); musb->g.speed = (power & MUSB_POWER_HSMODE) ? USB_SPEED_HIGH : USB_SPEED_FULL; } switch (musb->ep0_state) { /* sequence #3 (no data stage), includes requests * we can't forward (notably SET_ADDRESS and the * device/endpoint feature set/clear operations) * plus SET_CONFIGURATION and others we must */ case MUSB_EP0_STAGE_ACKWAIT: handled = service_zero_data_request( musb, &setup); /* * We're expecting no data in any case, so * always set the DATAEND bit -- doing this * here helps avoid SetupEnd interrupt coming * in the idle stage when we're stalling... */ musb->ackpend |= MUSB_CSR0_P_DATAEND; /* status stage might be immediate */ if (handled > 0) musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; break; /* sequence #1 (IN to host), includes GET_STATUS * requests that we can't forward, GET_DESCRIPTOR * and others that we must */ case MUSB_EP0_STAGE_TX: handled = service_in_request(musb, &setup); if (handled > 0) { musb->ackpend = MUSB_CSR0_TXPKTRDY | MUSB_CSR0_P_DATAEND; musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; } break; /* sequence #2 (OUT from host), always forward */ default: /* MUSB_EP0_STAGE_RX */ break; } dev_dbg(musb->controller, "handled %d, csr %04x, ep0stage %s\n", handled, csr, decode_ep0stage(musb->ep0_state)); /* unless we need to delegate this to the gadget * driver, we know how to wrap this up: csr0 has * not yet been written. */ if (handled < 0) goto stall; else if (handled > 0) goto finish; handled = forward_to_driver(musb, &setup); if (handled < 0) { musb_ep_select(mbase, 0); stall: dev_dbg(musb->controller, "stall (%d)\n", handled); musb->ackpend |= MUSB_CSR0_P_SENDSTALL; musb->ep0_state = MUSB_EP0_STAGE_IDLE; finish: musb_writew(regs, MUSB_CSR0, musb->ackpend); musb->ackpend = 0; } } break; case MUSB_EP0_STAGE_ACKWAIT: /* This should not happen. But happens with tusb6010 with * g_file_storage and high speed. Do nothing. */ retval = IRQ_HANDLED; break; default: /* "can't happen" */ WARN_ON(1); musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL); musb->ep0_state = MUSB_EP0_STAGE_IDLE; break; } return retval; } static int musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) { /* always enabled */ return -EINVAL; } static int musb_g_ep0_disable(struct usb_ep *e) { /* always enabled */ return -EINVAL; } static int musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags) { struct musb_ep *ep; struct musb_request *req; struct musb *musb; int status; unsigned long lockflags; void __iomem *regs; if (!e || !r) return -EINVAL; ep = to_musb_ep(e); musb = ep->musb; regs = musb->control_ep->regs; req = to_musb_request(r); req->musb = musb; req->request.actual = 0; req->request.status = -EINPROGRESS; req->tx = ep->is_in; spin_lock_irqsave(&musb->lock, lockflags); if (!list_empty(&ep->req_list)) { status = -EBUSY; goto cleanup; } switch (musb->ep0_state) { case MUSB_EP0_STAGE_RX: /* control-OUT data */ case MUSB_EP0_STAGE_TX: /* control-IN data */ case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */ status = 0; break; default: dev_dbg(musb->controller, "ep0 request queued in state %d\n", musb->ep0_state); status = -EINVAL; goto cleanup; } /* add request to the list */ list_add_tail(&req->list, &ep->req_list); dev_dbg(musb->controller, "queue to %s (%s), length=%d\n", ep->name, ep->is_in ? "IN/TX" : "OUT/RX", req->request.length); musb_ep_select(musb->mregs, 0); /* sequence #1, IN ... start writing the data */ if (musb->ep0_state == MUSB_EP0_STAGE_TX) ep0_txstate(musb); /* sequence #3, no-data ... issue IN status */ else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) { if (req->request.length) status = -EINVAL; else { musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; musb_writew(regs, MUSB_CSR0, musb->ackpend | MUSB_CSR0_P_DATAEND); musb->ackpend = 0; musb_g_ep0_giveback(ep->musb, r); } /* else for sequence #2 (OUT), caller provides a buffer * before the next packet arrives. deferred responses * (after SETUP is acked) are racey. */ } else if (musb->ackpend) { musb_writew(regs, MUSB_CSR0, musb->ackpend); musb->ackpend = 0; } cleanup: spin_unlock_irqrestore(&musb->lock, lockflags); return status; } static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req) { /* we just won't support this */ return -EINVAL; } static int musb_g_ep0_halt(struct usb_ep *e, int value) { struct musb_ep *ep; struct musb *musb; void __iomem *base, *regs; unsigned long flags; int status; u16 csr; if (!e || !value) return -EINVAL; ep = to_musb_ep(e); musb = ep->musb; base = musb->mregs; regs = musb->control_ep->regs; status = 0; spin_lock_irqsave(&musb->lock, flags); if (!list_empty(&ep->req_list)) { status = -EBUSY; goto cleanup; } musb_ep_select(base, 0); csr = musb->ackpend; switch (musb->ep0_state) { /* Stalls are usually issued after parsing SETUP packet, either * directly in irq context from setup() or else later. */ case MUSB_EP0_STAGE_TX: /* control-IN data */ case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */ case MUSB_EP0_STAGE_RX: /* control-OUT data */ csr = musb_readw(regs, MUSB_CSR0); /* FALLTHROUGH */ /* It's also OK to issue stalls during callbacks when a non-empty * DATA stage buffer has been read (or even written). */ case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */ case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */ csr |= MUSB_CSR0_P_SENDSTALL; musb_writew(regs, MUSB_CSR0, csr); musb->ep0_state = MUSB_EP0_STAGE_IDLE; musb->ackpend = 0; break; default: dev_dbg(musb->controller, "ep0 can't halt in state %d\n", musb->ep0_state); status = -EINVAL; } cleanup: spin_unlock_irqrestore(&musb->lock, flags); return status; } const struct usb_ep_ops musb_g_ep0_ops = { .enable = musb_g_ep0_enable, .disable = musb_g_ep0_disable, .alloc_request = musb_alloc_request, .free_request = musb_free_request, .queue = musb_g_ep0_queue, .dequeue = musb_g_ep0_dequeue, .set_halt = musb_g_ep0_halt, };
gpl-2.0
hallovveen31/smooth
drivers/staging/lirc/lirc_parallel.c
2381
16173
/* * lirc_parallel.c * * lirc_parallel - device driver for infra-red signal receiving and * transmitting unit built by the author * * Copyright (C) 1998 Christoph Bartelmus <lirc@bartelmus.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /*** Includes ***/ #include <linux/module.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/uaccess.h> #include <asm/div64.h> #include <linux/poll.h> #include <linux/parport.h> #include <linux/platform_device.h> #include <media/lirc.h> #include <media/lirc_dev.h> #include "lirc_parallel.h" #define LIRC_DRIVER_NAME "lirc_parallel" #ifndef LIRC_IRQ #define LIRC_IRQ 7 #endif #ifndef LIRC_PORT #define LIRC_PORT 0x378 #endif #ifndef LIRC_TIMER #define LIRC_TIMER 65536 #endif /*** Global Variables ***/ static int debug; static int check_pselecd; unsigned int irq = LIRC_IRQ; unsigned int io = LIRC_PORT; #ifdef LIRC_TIMER unsigned int timer; unsigned int default_timer = LIRC_TIMER; #endif #define RBUF_SIZE (256) /* this must be a power of 2 larger than 1 */ static int rbuf[RBUF_SIZE]; DECLARE_WAIT_QUEUE_HEAD(lirc_wait); unsigned int rptr; unsigned int wptr; unsigned int lost_irqs; int is_open; struct parport *pport; struct pardevice *ppdevice; int is_claimed; unsigned int tx_mask = 1; /*** Internal Functions ***/ static unsigned int in(int offset) { switch (offset) { case LIRC_LP_BASE: return parport_read_data(pport); case LIRC_LP_STATUS: return parport_read_status(pport); case LIRC_LP_CONTROL: return parport_read_control(pport); } return 0; /* make compiler happy */ } static void out(int offset, int value) { switch (offset) { case LIRC_LP_BASE: parport_write_data(pport, value); break; case LIRC_LP_CONTROL: parport_write_control(pport, value); break; case LIRC_LP_STATUS: printk(KERN_INFO "%s: attempt to write to status register\n", LIRC_DRIVER_NAME); break; } } static unsigned int lirc_get_timer(void) { return in(LIRC_PORT_TIMER) & LIRC_PORT_TIMER_BIT; } static unsigned int lirc_get_signal(void) { return in(LIRC_PORT_SIGNAL) & LIRC_PORT_SIGNAL_BIT; } static void lirc_on(void) { out(LIRC_PORT_DATA, tx_mask); } static void lirc_off(void) { out(LIRC_PORT_DATA, 0); } static unsigned int init_lirc_timer(void) { struct timeval tv, now; unsigned int level, newlevel, timeelapsed, newtimer; int count = 0; do_gettimeofday(&tv); tv.tv_sec++; /* wait max. 1 sec. */ level = lirc_get_timer(); do { newlevel = lirc_get_timer(); if (level == 0 && newlevel != 0) count++; level = newlevel; do_gettimeofday(&now); } while (count < 1000 && (now.tv_sec < tv.tv_sec || (now.tv_sec == tv.tv_sec && now.tv_usec < tv.tv_usec))); timeelapsed = ((now.tv_sec + 1 - tv.tv_sec)*1000000 + (now.tv_usec - tv.tv_usec)); if (count >= 1000 && timeelapsed > 0) { if (default_timer == 0) { /* autodetect timer */ newtimer = (1000000*count)/timeelapsed; printk(KERN_INFO "%s: %u Hz timer detected\n", LIRC_DRIVER_NAME, newtimer); return newtimer; } else { newtimer = (1000000*count)/timeelapsed; if (abs(newtimer - default_timer) > default_timer/10) { /* bad timer */ printk(KERN_NOTICE "%s: bad timer: %u Hz\n", LIRC_DRIVER_NAME, newtimer); printk(KERN_NOTICE "%s: using default timer: " "%u Hz\n", LIRC_DRIVER_NAME, default_timer); return default_timer; } else { printk(KERN_INFO "%s: %u Hz timer detected\n", LIRC_DRIVER_NAME, newtimer); return newtimer; /* use detected value */ } } } else { printk(KERN_NOTICE "%s: no timer detected\n", LIRC_DRIVER_NAME); return 0; } } static int lirc_claim(void) { if (parport_claim(ppdevice) != 0) { printk(KERN_WARNING "%s: could not claim port\n", LIRC_DRIVER_NAME); printk(KERN_WARNING "%s: waiting for port becoming available" "\n", LIRC_DRIVER_NAME); if (parport_claim_or_block(ppdevice) < 0) { printk(KERN_NOTICE "%s: could not claim port, giving" " up\n", LIRC_DRIVER_NAME); return 0; } } out(LIRC_LP_CONTROL, LP_PSELECP|LP_PINITP); is_claimed = 1; return 1; } /*** interrupt handler ***/ static void rbuf_write(int signal) { unsigned int nwptr; nwptr = (wptr + 1) & (RBUF_SIZE - 1); if (nwptr == rptr) { /* no new signals will be accepted */ lost_irqs++; printk(KERN_NOTICE "%s: buffer overrun\n", LIRC_DRIVER_NAME); return; } rbuf[wptr] = signal; wptr = nwptr; } static void irq_handler(void *blah) { struct timeval tv; static struct timeval lasttv; static int init; long signal; int data; unsigned int level, newlevel; unsigned int timeout; if (!is_open) return; if (!is_claimed) return; #if 0 /* disable interrupt */ disable_irq(irq); out(LIRC_PORT_IRQ, in(LIRC_PORT_IRQ) & (~LP_PINTEN)); #endif if (check_pselecd && (in(1) & LP_PSELECD)) return; #ifdef LIRC_TIMER if (init) { do_gettimeofday(&tv); signal = tv.tv_sec - lasttv.tv_sec; if (signal > 15) /* really long time */ data = PULSE_MASK; else data = (int) (signal*1000000 + tv.tv_usec - lasttv.tv_usec + LIRC_SFH506_DELAY); rbuf_write(data); /* space */ } else { if (timer == 0) { /* * wake up; we'll lose this signal, but it will be * garbage if the device is turned on anyway */ timer = init_lirc_timer(); /* enable_irq(irq); */ return; } init = 1; } timeout = timer/10; /* timeout after 1/10 sec. */ signal = 1; level = lirc_get_timer(); do { newlevel = lirc_get_timer(); if (level == 0 && newlevel != 0) signal++; level = newlevel; /* giving up */ if (signal > timeout || (check_pselecd && (in(1) & LP_PSELECD))) { signal = 0; printk(KERN_NOTICE "%s: timeout\n", LIRC_DRIVER_NAME); break; } } while (lirc_get_signal()); if (signal != 0) { /* adjust value to usecs */ __u64 helper; helper = ((__u64) signal)*1000000; do_div(helper, timer); signal = (long) helper; if (signal > LIRC_SFH506_DELAY) data = signal - LIRC_SFH506_DELAY; else data = 1; rbuf_write(PULSE_BIT|data); /* pulse */ } do_gettimeofday(&lasttv); #else /* add your code here */ #endif wake_up_interruptible(&lirc_wait); /* enable interrupt */ /* enable_irq(irq); out(LIRC_PORT_IRQ, in(LIRC_PORT_IRQ)|LP_PINTEN); */ } /*** file operations ***/ static loff_t lirc_lseek(struct file *filep, loff_t offset, int orig) { return -ESPIPE; } static ssize_t lirc_read(struct file *filep, char *buf, size_t n, loff_t *ppos) { int result = 0; int count = 0; DECLARE_WAITQUEUE(wait, current); if (n % sizeof(int)) return -EINVAL; add_wait_queue(&lirc_wait, &wait); set_current_state(TASK_INTERRUPTIBLE); while (count < n) { if (rptr != wptr) { if (copy_to_user(buf+count, (char *) &rbuf[rptr], sizeof(int))) { result = -EFAULT; break; } rptr = (rptr + 1) & (RBUF_SIZE - 1); count += sizeof(int); } else { if (filep->f_flags & O_NONBLOCK) { result = -EAGAIN; break; } if (signal_pending(current)) { result = -ERESTARTSYS; break; } schedule(); set_current_state(TASK_INTERRUPTIBLE); } } remove_wait_queue(&lirc_wait, &wait); set_current_state(TASK_RUNNING); return count ? count : result; } static ssize_t lirc_write(struct file *filep, const char *buf, size_t n, loff_t *ppos) { int count; unsigned int i; unsigned int level, newlevel; unsigned long flags; int counttimer; int *wbuf; ssize_t ret; if (!is_claimed) return -EBUSY; count = n / sizeof(int); if (n % sizeof(int) || count % 2 == 0) return -EINVAL; wbuf = memdup_user(buf, n); if (IS_ERR(wbuf)) return PTR_ERR(wbuf); #ifdef LIRC_TIMER if (timer == 0) { /* try again if device is ready */ timer = init_lirc_timer(); if (timer == 0) { ret = -EIO; goto out; } } /* adjust values from usecs */ for (i = 0; i < count; i++) { __u64 helper; helper = ((__u64) wbuf[i])*timer; do_div(helper, 1000000); wbuf[i] = (int) helper; } local_irq_save(flags); i = 0; while (i < count) { level = lirc_get_timer(); counttimer = 0; lirc_on(); do { newlevel = lirc_get_timer(); if (level == 0 && newlevel != 0) counttimer++; level = newlevel; if (check_pselecd && (in(1) & LP_PSELECD)) { lirc_off(); local_irq_restore(flags); ret = -EIO; goto out; } } while (counttimer < wbuf[i]); i++; lirc_off(); if (i == count) break; counttimer = 0; do { newlevel = lirc_get_timer(); if (level == 0 && newlevel != 0) counttimer++; level = newlevel; if (check_pselecd && (in(1) & LP_PSELECD)) { local_irq_restore(flags); ret = -EIO; goto out; } } while (counttimer < wbuf[i]); i++; } local_irq_restore(flags); #else /* place code that handles write without external timer here */ #endif ret = n; out: kfree(wbuf); return ret; } static unsigned int lirc_poll(struct file *file, poll_table *wait) { poll_wait(file, &lirc_wait, wait); if (rptr != wptr) return POLLIN | POLLRDNORM; return 0; } static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { int result; __u32 features = LIRC_CAN_SET_TRANSMITTER_MASK | LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2; __u32 mode; __u32 value; switch (cmd) { case LIRC_GET_FEATURES: result = put_user(features, (__u32 *) arg); if (result) return result; break; case LIRC_GET_SEND_MODE: result = put_user(LIRC_MODE_PULSE, (__u32 *) arg); if (result) return result; break; case LIRC_GET_REC_MODE: result = put_user(LIRC_MODE_MODE2, (__u32 *) arg); if (result) return result; break; case LIRC_SET_SEND_MODE: result = get_user(mode, (__u32 *) arg); if (result) return result; if (mode != LIRC_MODE_PULSE) return -EINVAL; break; case LIRC_SET_REC_MODE: result = get_user(mode, (__u32 *) arg); if (result) return result; if (mode != LIRC_MODE_MODE2) return -ENOSYS; break; case LIRC_SET_TRANSMITTER_MASK: result = get_user(value, (__u32 *) arg); if (result) return result; if ((value & LIRC_PARALLEL_TRANSMITTER_MASK) != value) return LIRC_PARALLEL_MAX_TRANSMITTERS; tx_mask = value; break; default: return -ENOIOCTLCMD; } return 0; } static int lirc_open(struct inode *node, struct file *filep) { if (is_open || !lirc_claim()) return -EBUSY; parport_enable_irq(pport); /* init read ptr */ rptr = 0; wptr = 0; lost_irqs = 0; is_open = 1; return 0; } static int lirc_close(struct inode *node, struct file *filep) { if (is_claimed) { is_claimed = 0; parport_release(ppdevice); } is_open = 0; return 0; } static const struct file_operations lirc_fops = { .owner = THIS_MODULE, .llseek = lirc_lseek, .read = lirc_read, .write = lirc_write, .poll = lirc_poll, .unlocked_ioctl = lirc_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = lirc_ioctl, #endif .open = lirc_open, .release = lirc_close }; static int set_use_inc(void *data) { return 0; } static void set_use_dec(void *data) { } static struct lirc_driver driver = { .name = LIRC_DRIVER_NAME, .minor = -1, .code_length = 1, .sample_rate = 0, .data = NULL, .add_to_buf = NULL, .set_use_inc = set_use_inc, .set_use_dec = set_use_dec, .fops = &lirc_fops, .dev = NULL, .owner = THIS_MODULE, }; static struct platform_device *lirc_parallel_dev; static int __devinit lirc_parallel_probe(struct platform_device *dev) { return 0; } static int __devexit lirc_parallel_remove(struct platform_device *dev) { return 0; } static int lirc_parallel_suspend(struct platform_device *dev, pm_message_t state) { return 0; } static int lirc_parallel_resume(struct platform_device *dev) { return 0; } static struct platform_driver lirc_parallel_driver = { .probe = lirc_parallel_probe, .remove = __devexit_p(lirc_parallel_remove), .suspend = lirc_parallel_suspend, .resume = lirc_parallel_resume, .driver = { .name = LIRC_DRIVER_NAME, .owner = THIS_MODULE, }, }; static int pf(void *handle); static void kf(void *handle); static int pf(void *handle) { parport_disable_irq(pport); is_claimed = 0; return 0; } static void kf(void *handle) { if (!is_open) return; if (!lirc_claim()) return; parport_enable_irq(pport); lirc_off(); /* this is a bit annoying when you actually print...*/ /* printk(KERN_INFO "%s: reclaimed port\n", LIRC_DRIVER_NAME); */ } /*** module initialization and cleanup ***/ static int __init lirc_parallel_init(void) { int result; result = platform_driver_register(&lirc_parallel_driver); if (result) { printk(KERN_NOTICE "platform_driver_register" " returned %d\n", result); return result; } lirc_parallel_dev = platform_device_alloc(LIRC_DRIVER_NAME, 0); if (!lirc_parallel_dev) { result = -ENOMEM; goto exit_driver_unregister; } result = platform_device_add(lirc_parallel_dev); if (result) goto exit_device_put; pport = parport_find_base(io); if (pport == NULL) { printk(KERN_NOTICE "%s: no port at %x found\n", LIRC_DRIVER_NAME, io); result = -ENXIO; goto exit_device_put; } ppdevice = parport_register_device(pport, LIRC_DRIVER_NAME, pf, kf, irq_handler, 0, NULL); parport_put_port(pport); if (ppdevice == NULL) { printk(KERN_NOTICE "%s: parport_register_device() failed\n", LIRC_DRIVER_NAME); result = -ENXIO; goto exit_device_put; } if (parport_claim(ppdevice) != 0) goto skip_init; is_claimed = 1; out(LIRC_LP_CONTROL, LP_PSELECP|LP_PINITP); #ifdef LIRC_TIMER if (debug) out(LIRC_PORT_DATA, tx_mask); timer = init_lirc_timer(); #if 0 /* continue even if device is offline */ if (timer == 0) { is_claimed = 0; parport_release(pport); parport_unregister_device(ppdevice); result = -EIO; goto exit_device_put; } #endif if (debug) out(LIRC_PORT_DATA, 0); #endif is_claimed = 0; parport_release(ppdevice); skip_init: driver.dev = &lirc_parallel_dev->dev; driver.minor = lirc_register_driver(&driver); if (driver.minor < 0) { printk(KERN_NOTICE "%s: register_chrdev() failed\n", LIRC_DRIVER_NAME); parport_unregister_device(ppdevice); result = -EIO; goto exit_device_put; } printk(KERN_INFO "%s: installed using port 0x%04x irq %d\n", LIRC_DRIVER_NAME, io, irq); return 0; exit_device_put: platform_device_put(lirc_parallel_dev); exit_driver_unregister: platform_driver_unregister(&lirc_parallel_driver); return result; } static void __exit lirc_parallel_exit(void) { parport_unregister_device(ppdevice); lirc_unregister_driver(driver.minor); } module_init(lirc_parallel_init); module_exit(lirc_parallel_exit); MODULE_DESCRIPTION("Infrared receiver driver for parallel ports."); MODULE_AUTHOR("Christoph Bartelmus"); MODULE_LICENSE("GPL"); module_param(io, int, S_IRUGO); MODULE_PARM_DESC(io, "I/O address base (0x3bc, 0x378 or 0x278)"); module_param(irq, int, S_IRUGO); MODULE_PARM_DESC(irq, "Interrupt (7 or 5)"); module_param(tx_mask, int, S_IRUGO); MODULE_PARM_DESC(tx_maxk, "Transmitter mask (default: 0x01)"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable debugging messages"); module_param(check_pselecd, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Check for printer (default: 0)");
gpl-2.0
AdiPat/android_kernel_htc_pico
drivers/video/nuc900fb.c
2637
19488
/* * * Copyright (c) 2009 Nuvoton technology corporation * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Description: * Nuvoton LCD Controller Driver * Author: * Wang Qiang (rurality.linux@gmail.com) 2009/12/11 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/wait.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/cpufreq.h> #include <linux/io.h> #include <linux/pm.h> #include <linux/device.h> #include <mach/map.h> #include <mach/regs-clock.h> #include <mach/regs-ldm.h> #include <mach/fb.h> #include <mach/clkdev.h> #include "nuc900fb.h" /* * Initialize the nuc900 video (dual) buffer address */ static void nuc900fb_set_lcdaddr(struct fb_info *info) { struct nuc900fb_info *fbi = info->par; void __iomem *regs = fbi->io; unsigned long vbaddr1, vbaddr2; vbaddr1 = info->fix.smem_start; vbaddr2 = info->fix.smem_start; vbaddr2 += info->fix.line_length * info->var.yres; /* set frambuffer start phy addr*/ writel(vbaddr1, regs + REG_LCM_VA_BADDR0); writel(vbaddr2, regs + REG_LCM_VA_BADDR1); writel(fbi->regs.lcd_va_fbctrl, regs + REG_LCM_VA_FBCTRL); writel(fbi->regs.lcd_va_scale, regs + REG_LCM_VA_SCALE); } /* * calculate divider for lcd div */ static unsigned int nuc900fb_calc_pixclk(struct nuc900fb_info *fbi, unsigned long pixclk) { unsigned long clk = fbi->clk_rate; unsigned long long div; /* pixclk is in picseconds. our clock is in Hz*/ /* div = (clk * pixclk)/10^12 */ div = (unsigned long long)clk * pixclk; div >>= 12; do_div(div, 625 * 625UL * 625); dev_dbg(fbi->dev, "pixclk %ld, divisor is %lld\n", pixclk, div); return div; } /* * Check the video params of 'var'. */ static int nuc900fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct nuc900fb_info *fbi = info->par; struct nuc900fb_mach_info *mach_info = fbi->dev->platform_data; struct nuc900fb_display *display = NULL; struct nuc900fb_display *default_display = mach_info->displays + mach_info->default_display; int i; dev_dbg(fbi->dev, "check_var(var=%p, info=%p)\n", var, info); /* validate x/y resolution */ /* choose default mode if possible */ if (var->xres == default_display->xres && var->yres == default_display->yres && var->bits_per_pixel == default_display->bpp) display = default_display; else for (i = 0; i < mach_info->num_displays; i++) if (var->xres == mach_info->displays[i].xres && var->yres == mach_info->displays[i].yres && var->bits_per_pixel == mach_info->displays[i].bpp) { display = mach_info->displays + i; break; } if (display == NULL) { printk(KERN_ERR "wrong resolution or depth %dx%d at %d bit per pixel\n", var->xres, var->yres, var->bits_per_pixel); return -EINVAL; } /* it should be the same size as the display */ var->xres_virtual = display->xres; var->yres_virtual = display->yres; var->height = display->height; var->width = display->width; /* copy lcd settings */ var->pixclock = display->pixclock; var->left_margin = display->left_margin; var->right_margin = display->right_margin; var->upper_margin = display->upper_margin; var->lower_margin = display->lower_margin; var->vsync_len = display->vsync_len; var->hsync_len = display->hsync_len; var->transp.offset = 0; var->transp.length = 0; fbi->regs.lcd_dccs = display->dccs; fbi->regs.lcd_device_ctrl = display->devctl; fbi->regs.lcd_va_fbctrl = display->fbctrl; fbi->regs.lcd_va_scale = display->scale; /* set R/G/B possions */ switch (var->bits_per_pixel) { case 1: case 2: case 4: case 8: default: var->red.offset = 0; var->red.length = var->bits_per_pixel; var->green = var->red; var->blue = var->red; break; case 12: var->red.length = 4; var->green.length = 4; var->blue.length = 4; var->red.offset = 8; var->green.offset = 4; var->blue.offset = 0; break; case 16: var->red.length = 5; var->green.length = 6; var->blue.length = 5; var->red.offset = 11; var->green.offset = 5; var->blue.offset = 0; break; case 18: var->red.length = 6; var->green.length = 6; var->blue.length = 6; var->red.offset = 12; var->green.offset = 6; var->blue.offset = 0; break; case 32: var->red.length = 8; var->green.length = 8; var->blue.length = 8; var->red.offset = 16; var->green.offset = 8; var->blue.offset = 0; break; } return 0; } /* * Calculate lcd register values from var setting & save into hw */ static void nuc900fb_calculate_lcd_regs(const struct fb_info *info, struct nuc900fb_hw *regs) { const struct fb_var_screeninfo *var = &info->var; int vtt = var->height + var->upper_margin + var->lower_margin; int htt = var->width + var->left_margin + var->right_margin; int hsync = var->width + var->right_margin; int vsync = var->height + var->lower_margin; regs->lcd_crtc_size = LCM_CRTC_SIZE_VTTVAL(vtt) | LCM_CRTC_SIZE_HTTVAL(htt); regs->lcd_crtc_dend = LCM_CRTC_DEND_VDENDVAL(var->height) | LCM_CRTC_DEND_HDENDVAL(var->width); regs->lcd_crtc_hr = LCM_CRTC_HR_EVAL(var->width + 5) | LCM_CRTC_HR_SVAL(var->width + 1); regs->lcd_crtc_hsync = LCM_CRTC_HSYNC_EVAL(hsync + var->hsync_len) | LCM_CRTC_HSYNC_SVAL(hsync); regs->lcd_crtc_vr = LCM_CRTC_VR_EVAL(vsync + var->vsync_len) | LCM_CRTC_VR_SVAL(vsync); } /* * Activate (set) the controller from the given framebuffer * information */ static void nuc900fb_activate_var(struct fb_info *info) { struct nuc900fb_info *fbi = info->par; void __iomem *regs = fbi->io; struct fb_var_screeninfo *var = &info->var; int clkdiv; clkdiv = nuc900fb_calc_pixclk(fbi, var->pixclock) - 1; if (clkdiv < 0) clkdiv = 0; nuc900fb_calculate_lcd_regs(info, &fbi->regs); /* set the new lcd registers*/ dev_dbg(fbi->dev, "new lcd register set:\n"); dev_dbg(fbi->dev, "dccs = 0x%08x\n", fbi->regs.lcd_dccs); dev_dbg(fbi->dev, "dev_ctl = 0x%08x\n", fbi->regs.lcd_device_ctrl); dev_dbg(fbi->dev, "crtc_size = 0x%08x\n", fbi->regs.lcd_crtc_size); dev_dbg(fbi->dev, "crtc_dend = 0x%08x\n", fbi->regs.lcd_crtc_dend); dev_dbg(fbi->dev, "crtc_hr = 0x%08x\n", fbi->regs.lcd_crtc_hr); dev_dbg(fbi->dev, "crtc_hsync = 0x%08x\n", fbi->regs.lcd_crtc_hsync); dev_dbg(fbi->dev, "crtc_vr = 0x%08x\n", fbi->regs.lcd_crtc_vr); writel(fbi->regs.lcd_device_ctrl, regs + REG_LCM_DEV_CTRL); writel(fbi->regs.lcd_crtc_size, regs + REG_LCM_CRTC_SIZE); writel(fbi->regs.lcd_crtc_dend, regs + REG_LCM_CRTC_DEND); writel(fbi->regs.lcd_crtc_hr, regs + REG_LCM_CRTC_HR); writel(fbi->regs.lcd_crtc_hsync, regs + REG_LCM_CRTC_HSYNC); writel(fbi->regs.lcd_crtc_vr, regs + REG_LCM_CRTC_VR); /* set lcd address pointers */ nuc900fb_set_lcdaddr(info); writel(fbi->regs.lcd_dccs, regs + REG_LCM_DCCS); } /* * Alters the hardware state. * */ static int nuc900fb_set_par(struct fb_info *info) { struct fb_var_screeninfo *var = &info->var; switch (var->bits_per_pixel) { case 32: case 24: case 18: case 16: case 12: info->fix.visual = FB_VISUAL_TRUECOLOR; break; case 1: info->fix.visual = FB_VISUAL_MONO01; break; default: info->fix.visual = FB_VISUAL_PSEUDOCOLOR; break; } info->fix.line_length = (var->xres_virtual * var->bits_per_pixel) / 8; /* activate this new configuration */ nuc900fb_activate_var(info); return 0; } static inline unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf) { chan &= 0xffff; chan >>= 16 - bf->length; return chan << bf->offset; } static int nuc900fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { unsigned int val; switch (info->fix.visual) { case FB_VISUAL_TRUECOLOR: /* true-colour, use pseuo-palette */ if (regno < 16) { u32 *pal = info->pseudo_palette; val = chan_to_field(red, &info->var.red); val |= chan_to_field(green, &info->var.green); val |= chan_to_field(blue, &info->var.blue); pal[regno] = val; } break; default: return 1; /* unknown type */ } return 0; } /** * nuc900fb_blank * */ static int nuc900fb_blank(int blank_mode, struct fb_info *info) { return 0; } static struct fb_ops nuc900fb_ops = { .owner = THIS_MODULE, .fb_check_var = nuc900fb_check_var, .fb_set_par = nuc900fb_set_par, .fb_blank = nuc900fb_blank, .fb_setcolreg = nuc900fb_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; static inline void modify_gpio(void __iomem *reg, unsigned long set, unsigned long mask) { unsigned long tmp; tmp = readl(reg) & ~mask; writel(tmp | set, reg); } /* * Initialise LCD-related registers */ static int nuc900fb_init_registers(struct fb_info *info) { struct nuc900fb_info *fbi = info->par; struct nuc900fb_mach_info *mach_info = fbi->dev->platform_data; void __iomem *regs = fbi->io; /*reset the display engine*/ writel(0, regs + REG_LCM_DCCS); writel(readl(regs + REG_LCM_DCCS) | LCM_DCCS_ENG_RST, regs + REG_LCM_DCCS); ndelay(100); writel(readl(regs + REG_LCM_DCCS) & (~LCM_DCCS_ENG_RST), regs + REG_LCM_DCCS); ndelay(100); writel(0, regs + REG_LCM_DEV_CTRL); /* config gpio output */ modify_gpio(W90X900_VA_GPIO + 0x54, mach_info->gpio_dir, mach_info->gpio_dir_mask); modify_gpio(W90X900_VA_GPIO + 0x58, mach_info->gpio_data, mach_info->gpio_data_mask); return 0; } /* * Alloc the SDRAM region of NUC900 for the frame buffer. * The buffer should be a non-cached, non-buffered, memory region * to allow palette and pixel writes without flushing the cache. */ static int __init nuc900fb_map_video_memory(struct fb_info *info) { struct nuc900fb_info *fbi = info->par; dma_addr_t map_dma; unsigned long map_size = PAGE_ALIGN(info->fix.smem_len); dev_dbg(fbi->dev, "nuc900fb_map_video_memory(fbi=%p) map_size %lu\n", fbi, map_size); info->screen_base = dma_alloc_writecombine(fbi->dev, map_size, &map_dma, GFP_KERNEL); if (!info->screen_base) return -ENOMEM; memset(info->screen_base, 0x00, map_size); info->fix.smem_start = map_dma; return 0; } static inline void nuc900fb_unmap_video_memory(struct fb_info *info) { struct nuc900fb_info *fbi = info->par; dma_free_writecombine(fbi->dev, PAGE_ALIGN(info->fix.smem_len), info->screen_base, info->fix.smem_start); } static irqreturn_t nuc900fb_irqhandler(int irq, void *dev_id) { struct nuc900fb_info *fbi = dev_id; void __iomem *regs = fbi->io; void __iomem *irq_base = fbi->irq_base; unsigned long lcdirq = readl(regs + REG_LCM_INT_CS); if (lcdirq & LCM_INT_CS_DISP_F_STATUS) { writel(readl(irq_base) | 1<<30, irq_base); /* wait VA_EN low */ if ((readl(regs + REG_LCM_DCCS) & LCM_DCCS_SINGLE) == LCM_DCCS_SINGLE) while ((readl(regs + REG_LCM_DCCS) & LCM_DCCS_VA_EN) == LCM_DCCS_VA_EN) ; /* display_out-enable */ writel(readl(regs + REG_LCM_DCCS) | LCM_DCCS_DISP_OUT_EN, regs + REG_LCM_DCCS); /* va-enable*/ writel(readl(regs + REG_LCM_DCCS) | LCM_DCCS_VA_EN, regs + REG_LCM_DCCS); } else if (lcdirq & LCM_INT_CS_UNDERRUN_INT) { writel(readl(irq_base) | LCM_INT_CS_UNDERRUN_INT, irq_base); } else if (lcdirq & LCM_INT_CS_BUS_ERROR_INT) { writel(readl(irq_base) | LCM_INT_CS_BUS_ERROR_INT, irq_base); } return IRQ_HANDLED; } #ifdef CONFIG_CPU_FREQ static int nuc900fb_cpufreq_transition(struct notifier_block *nb, unsigned long val, void *data) { struct nuc900fb_info *info; struct fb_info *fbinfo; long delta_f; info = container_of(nb, struct nuc900fb_info, freq_transition); fbinfo = platform_get_drvdata(to_platform_device(info->dev)); delta_f = info->clk_rate - clk_get_rate(info->clk); if ((val == CPUFREQ_POSTCHANGE && delta_f > 0) || (val == CPUFREQ_PRECHANGE && delta_f < 0)) { info->clk_rate = clk_get_rate(info->clk); nuc900fb_activate_var(fbinfo); } return 0; } static inline int nuc900fb_cpufreq_register(struct nuc900fb_info *fbi) { fbi->freq_transition.notifier_call = nuc900fb_cpufreq_transition; return cpufreq_register_notifier(&fbi->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } static inline void nuc900fb_cpufreq_deregister(struct nuc900fb_info *fbi) { cpufreq_unregister_notifier(&fbi->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } #else static inline int nuc900fb_cpufreq_transition(struct notifier_block *nb, unsigned long val, void *data) { return 0; } static inline int nuc900fb_cpufreq_register(struct nuc900fb_info *fbi) { return 0; } static inline void nuc900fb_cpufreq_deregister(struct nuc900fb_info *info) { } #endif static char driver_name[] = "nuc900fb"; static int __devinit nuc900fb_probe(struct platform_device *pdev) { struct nuc900fb_info *fbi; struct nuc900fb_display *display; struct fb_info *fbinfo; struct nuc900fb_mach_info *mach_info; struct resource *res; int ret; int irq; int i; int size; dev_dbg(&pdev->dev, "devinit\n"); mach_info = pdev->dev.platform_data; if (mach_info == NULL) { dev_err(&pdev->dev, "no platform data for lcd, cannot attach\n"); return -EINVAL; } if (mach_info->default_display > mach_info->num_displays) { dev_err(&pdev->dev, "default display No. is %d but only %d displays \n", mach_info->default_display, mach_info->num_displays); return -EINVAL; } display = mach_info->displays + mach_info->default_display; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no irq for device\n"); return -ENOENT; } fbinfo = framebuffer_alloc(sizeof(struct nuc900fb_info), &pdev->dev); if (!fbinfo) return -ENOMEM; platform_set_drvdata(pdev, fbinfo); fbi = fbinfo->par; fbi->dev = &pdev->dev; #ifdef CONFIG_CPU_NUC950 fbi->drv_type = LCDDRV_NUC950; #endif res = platform_get_resource(pdev, IORESOURCE_MEM, 0); size = (res->end - res->start) + 1; fbi->mem = request_mem_region(res->start, size, pdev->name); if (fbi->mem == NULL) { dev_err(&pdev->dev, "failed to alloc memory region\n"); ret = -ENOENT; goto free_fb; } fbi->io = ioremap(res->start, size); if (fbi->io == NULL) { dev_err(&pdev->dev, "ioremap() of lcd registers failed\n"); ret = -ENXIO; goto release_mem_region; } fbi->irq_base = fbi->io + REG_LCM_INT_CS; /* Stop the LCD */ writel(0, fbi->io + REG_LCM_DCCS); /* fill the fbinfo*/ strcpy(fbinfo->fix.id, driver_name); fbinfo->fix.type = FB_TYPE_PACKED_PIXELS; fbinfo->fix.type_aux = 0; fbinfo->fix.xpanstep = 0; fbinfo->fix.ypanstep = 0; fbinfo->fix.ywrapstep = 0; fbinfo->fix.accel = FB_ACCEL_NONE; fbinfo->var.nonstd = 0; fbinfo->var.activate = FB_ACTIVATE_NOW; fbinfo->var.accel_flags = 0; fbinfo->var.vmode = FB_VMODE_NONINTERLACED; fbinfo->fbops = &nuc900fb_ops; fbinfo->flags = FBINFO_FLAG_DEFAULT; fbinfo->pseudo_palette = &fbi->pseudo_pal; ret = request_irq(irq, nuc900fb_irqhandler, IRQF_DISABLED, pdev->name, fbinfo); if (ret) { dev_err(&pdev->dev, "cannot register irq handler %d -err %d\n", irq, ret); ret = -EBUSY; goto release_regs; } fbi->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(fbi->clk)) { printk(KERN_ERR "nuc900-lcd:failed to get lcd clock source\n"); ret = PTR_ERR(fbi->clk); goto release_irq; } clk_enable(fbi->clk); dev_dbg(&pdev->dev, "got and enabled clock\n"); fbi->clk_rate = clk_get_rate(fbi->clk); /* calutate the video buffer size */ for (i = 0; i < mach_info->num_displays; i++) { unsigned long smem_len = mach_info->displays[i].xres; smem_len *= mach_info->displays[i].yres; smem_len *= mach_info->displays[i].bpp; smem_len >>= 3; if (fbinfo->fix.smem_len < smem_len) fbinfo->fix.smem_len = smem_len; } /* Initialize Video Memory */ ret = nuc900fb_map_video_memory(fbinfo); if (ret) { printk(KERN_ERR "Failed to allocate video RAM: %x\n", ret); goto release_clock; } dev_dbg(&pdev->dev, "got video memory\n"); fbinfo->var.xres = display->xres; fbinfo->var.yres = display->yres; fbinfo->var.bits_per_pixel = display->bpp; nuc900fb_init_registers(fbinfo); nuc900fb_check_var(&fbinfo->var, fbinfo); ret = nuc900fb_cpufreq_register(fbi); if (ret < 0) { dev_err(&pdev->dev, "Failed to register cpufreq\n"); goto free_video_memory; } ret = register_framebuffer(fbinfo); if (ret) { printk(KERN_ERR "failed to register framebuffer device: %d\n", ret); goto free_cpufreq; } printk(KERN_INFO "fb%d: %s frame buffer device\n", fbinfo->node, fbinfo->fix.id); return 0; free_cpufreq: nuc900fb_cpufreq_deregister(fbi); free_video_memory: nuc900fb_unmap_video_memory(fbinfo); release_clock: clk_disable(fbi->clk); clk_put(fbi->clk); release_irq: free_irq(irq, fbi); release_regs: iounmap(fbi->io); release_mem_region: release_mem_region(res->start, size); free_fb: framebuffer_release(fbinfo); return ret; } /* * shutdown the lcd controller */ static void nuc900fb_stop_lcd(struct fb_info *info) { struct nuc900fb_info *fbi = info->par; void __iomem *regs = fbi->io; writel((~LCM_DCCS_DISP_INT_EN) | (~LCM_DCCS_VA_EN) | (~LCM_DCCS_OSD_EN), regs + REG_LCM_DCCS); } /* * Cleanup */ static int nuc900fb_remove(struct platform_device *pdev) { struct fb_info *fbinfo = platform_get_drvdata(pdev); struct nuc900fb_info *fbi = fbinfo->par; int irq; nuc900fb_stop_lcd(fbinfo); msleep(1); unregister_framebuffer(fbinfo); nuc900fb_cpufreq_deregister(fbi); nuc900fb_unmap_video_memory(fbinfo); iounmap(fbi->io); irq = platform_get_irq(pdev, 0); free_irq(irq, fbi); release_resource(fbi->mem); kfree(fbi->mem); platform_set_drvdata(pdev, NULL); framebuffer_release(fbinfo); return 0; } #ifdef CONFIG_PM /* * suspend and resume support for the lcd controller */ static int nuc900fb_suspend(struct platform_device *dev, pm_message_t state) { struct fb_info *fbinfo = platform_get_drvdata(dev); struct nuc900fb_info *info = fbinfo->par; nuc900fb_stop_lcd(fbinfo); msleep(1); clk_disable(info->clk); return 0; } static int nuc900fb_resume(struct platform_device *dev) { struct fb_info *fbinfo = platform_get_drvdata(dev); struct nuc900fb_info *fbi = fbinfo->par; printk(KERN_INFO "nuc900fb resume\n"); clk_enable(fbi->clk); msleep(1); nuc900fb_init_registers(fbinfo); nuc900fb_activate_var(fbinfo); return 0; } #else #define nuc900fb_suspend NULL #define nuc900fb_resume NULL #endif static struct platform_driver nuc900fb_driver = { .probe = nuc900fb_probe, .remove = nuc900fb_remove, .suspend = nuc900fb_suspend, .resume = nuc900fb_resume, .driver = { .name = "nuc900-lcd", .owner = THIS_MODULE, }, }; int __devinit nuc900fb_init(void) { return platform_driver_register(&nuc900fb_driver); } static void __exit nuc900fb_cleanup(void) { platform_driver_unregister(&nuc900fb_driver); } module_init(nuc900fb_init); module_exit(nuc900fb_cleanup); MODULE_DESCRIPTION("Framebuffer driver for the NUC900"); MODULE_LICENSE("GPL");
gpl-2.0
dreamer7/ZOPO-TSN
drivers/net/tun.c
2637
39428
/* * TUN - Universal TUN/TAP device driver. * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ */ /* * Changes: * * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 * Add TUNSETLINK ioctl to set the link encapsulation * * Mark Smith <markzzzsmith@yahoo.com.au> * Use random_ether_addr() for tap MAC address. * * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 * Fixes in packet dropping, queue length setting and queue wakeup. * Increased default tx queue length. * Added ethtool API. * Minor cleanups * * Daniel Podlejski <underley@underley.eu.org> * Modifications for 2.3.99-pre5 kernel. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DRV_NAME "tun" #define DRV_VERSION "1.6" #define DRV_DESCRIPTION "Universal TUN/TAP device driver" #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/miscdevice.h> #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/compat.h> #include <linux/if.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/if_tun.h> #include <linux/crc32.h> #include <linux/nsproxy.h> #include <linux/virtio_net.h> #include <linux/rcupdate.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/rtnetlink.h> #include <net/sock.h> #include <asm/uaccess.h> /* Uncomment to enable debugging */ /* #define TUN_DEBUG 1 */ #ifdef TUN_DEBUG static int debug; #define tun_debug(level, tun, fmt, args...) \ do { \ if (tun->debug) \ netdev_printk(level, tun->dev, fmt, ##args); \ } while (0) #define DBG1(level, fmt, args...) \ do { \ if (debug == 2) \ printk(level fmt, ##args); \ } while (0) #else #define tun_debug(level, tun, fmt, args...) \ do { \ if (0) \ netdev_printk(level, tun->dev, fmt, ##args); \ } while (0) #define DBG1(level, fmt, args...) \ do { \ if (0) \ printk(level fmt, ##args); \ } while (0) #endif #define FLT_EXACT_COUNT 8 struct tap_filter { unsigned int count; /* Number of addrs. Zero means disabled */ u32 mask[2]; /* Mask of the hashed addrs */ unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; }; struct tun_file { atomic_t count; struct tun_struct *tun; struct net *net; }; struct tun_sock; struct tun_struct { struct tun_file *tfile; unsigned int flags; uid_t owner; gid_t group; struct net_device *dev; netdev_features_t set_features; #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ NETIF_F_TSO6|NETIF_F_UFO) struct fasync_struct *fasync; struct tap_filter txflt; struct socket socket; struct socket_wq wq; int vnet_hdr_sz; #ifdef TUN_DEBUG int debug; #endif }; struct tun_sock { struct sock sk; struct tun_struct *tun; }; static inline struct tun_sock *tun_sk(struct sock *sk) { return container_of(sk, struct tun_sock, sk); } static int tun_attach(struct tun_struct *tun, struct file *file) { struct tun_file *tfile = file->private_data; int err; ASSERT_RTNL(); netif_tx_lock_bh(tun->dev); err = -EINVAL; if (tfile->tun) goto out; err = -EBUSY; if (tun->tfile) goto out; err = 0; tfile->tun = tun; tun->tfile = tfile; tun->socket.file = file; netif_carrier_on(tun->dev); dev_hold(tun->dev); sock_hold(tun->socket.sk); atomic_inc(&tfile->count); out: netif_tx_unlock_bh(tun->dev); return err; } static void __tun_detach(struct tun_struct *tun) { /* Detach from net device */ netif_tx_lock_bh(tun->dev); netif_carrier_off(tun->dev); tun->tfile = NULL; tun->socket.file = NULL; netif_tx_unlock_bh(tun->dev); /* Drop read queue */ skb_queue_purge(&tun->socket.sk->sk_receive_queue); /* Drop the extra count on the net device */ dev_put(tun->dev); } static void tun_detach(struct tun_struct *tun) { rtnl_lock(); __tun_detach(tun); rtnl_unlock(); } static struct tun_struct *__tun_get(struct tun_file *tfile) { struct tun_struct *tun = NULL; if (atomic_inc_not_zero(&tfile->count)) tun = tfile->tun; return tun; } static struct tun_struct *tun_get(struct file *file) { return __tun_get(file->private_data); } static void tun_put(struct tun_struct *tun) { struct tun_file *tfile = tun->tfile; if (atomic_dec_and_test(&tfile->count)) tun_detach(tfile->tun); } /* TAP filtering */ static void addr_hash_set(u32 *mask, const u8 *addr) { int n = ether_crc(ETH_ALEN, addr) >> 26; mask[n >> 5] |= (1 << (n & 31)); } static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) { int n = ether_crc(ETH_ALEN, addr) >> 26; return mask[n >> 5] & (1 << (n & 31)); } static int update_filter(struct tap_filter *filter, void __user *arg) { struct { u8 u[ETH_ALEN]; } *addr; struct tun_filter uf; int err, alen, n, nexact; if (copy_from_user(&uf, arg, sizeof(uf))) return -EFAULT; if (!uf.count) { /* Disabled */ filter->count = 0; return 0; } alen = ETH_ALEN * uf.count; addr = kmalloc(alen, GFP_KERNEL); if (!addr) return -ENOMEM; if (copy_from_user(addr, arg + sizeof(uf), alen)) { err = -EFAULT; goto done; } /* The filter is updated without holding any locks. Which is * perfectly safe. We disable it first and in the worst * case we'll accept a few undesired packets. */ filter->count = 0; wmb(); /* Use first set of addresses as an exact filter */ for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) memcpy(filter->addr[n], addr[n].u, ETH_ALEN); nexact = n; /* Remaining multicast addresses are hashed, * unicast will leave the filter disabled. */ memset(filter->mask, 0, sizeof(filter->mask)); for (; n < uf.count; n++) { if (!is_multicast_ether_addr(addr[n].u)) { err = 0; /* no filter */ goto done; } addr_hash_set(filter->mask, addr[n].u); } /* For ALLMULTI just set the mask to all ones. * This overrides the mask populated above. */ if ((uf.flags & TUN_FLT_ALLMULTI)) memset(filter->mask, ~0, sizeof(filter->mask)); /* Now enable the filter */ wmb(); filter->count = nexact; /* Return the number of exact filters */ err = nexact; done: kfree(addr); return err; } /* Returns: 0 - drop, !=0 - accept */ static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) { /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect * at this point. */ struct ethhdr *eh = (struct ethhdr *) skb->data; int i; /* Exact match */ for (i = 0; i < filter->count; i++) if (!compare_ether_addr(eh->h_dest, filter->addr[i])) return 1; /* Inexact match (multicast only) */ if (is_multicast_ether_addr(eh->h_dest)) return addr_hash_test(filter->mask, eh->h_dest); return 0; } /* * Checks whether the packet is accepted or not. * Returns: 0 - drop, !=0 - accept */ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) { if (!filter->count) return 1; return run_filter(filter, skb); } /* Network device part of the driver */ static const struct ethtool_ops tun_ethtool_ops; /* Net device detach from fd. */ static void tun_net_uninit(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); struct tun_file *tfile = tun->tfile; /* Inform the methods they need to stop using the dev. */ if (tfile) { wake_up_all(&tun->wq.wait); if (atomic_dec_and_test(&tfile->count)) __tun_detach(tun); } } static void tun_free_netdev(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); sk_release_kernel(tun->socket.sk); } /* Net device open. */ static int tun_net_open(struct net_device *dev) { netif_start_queue(dev); return 0; } /* Net device close. */ static int tun_net_close(struct net_device *dev) { netif_stop_queue(dev); return 0; } /* Net device start xmit */ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); /* Drop packet if interface is not attached */ if (!tun->tfile) goto drop; /* Drop if the filter does not like it. * This is a noop if the filter is disabled. * Filter can be enabled only for the TAP devices. */ if (!check_filter(&tun->txflt, skb)) goto drop; if (tun->socket.sk->sk_filter && sk_filter(tun->socket.sk, skb)) goto drop; if (skb_queue_len(&tun->socket.sk->sk_receive_queue) >= dev->tx_queue_len) { if (!(tun->flags & TUN_ONE_QUEUE)) { /* Normal queueing mode. */ /* Packet scheduler handles dropping of further packets. */ netif_stop_queue(dev); /* We won't see all dropped packets individually, so overrun * error is more appropriate. */ dev->stats.tx_fifo_errors++; } else { /* Single queue mode. * Driver handles dropping of all packets itself. */ goto drop; } } /* Orphan the skb - required as we might hang on to it * for indefinite time. */ skb_orphan(skb); /* Enqueue packet */ skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb); /* Notify and wake up reader process */ if (tun->flags & TUN_FASYNC) kill_fasync(&tun->fasync, SIGIO, POLL_IN); wake_up_interruptible_poll(&tun->wq.wait, POLLIN | POLLRDNORM | POLLRDBAND); return NETDEV_TX_OK; drop: dev->stats.tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; } static void tun_net_mclist(struct net_device *dev) { /* * This callback is supposed to deal with mc filter in * _rx_ path and has nothing to do with the _tx_ path. * In rx path we always accept everything userspace gives us. */ } #define MIN_MTU 68 #define MAX_MTU 65535 static int tun_net_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU) return -EINVAL; dev->mtu = new_mtu; return 0; } static netdev_features_t tun_net_fix_features(struct net_device *dev, netdev_features_t features) { struct tun_struct *tun = netdev_priv(dev); return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); } #ifdef CONFIG_NET_POLL_CONTROLLER static void tun_poll_controller(struct net_device *dev) { /* * Tun only receives frames when: * 1) the char device endpoint gets data from user space * 2) the tun socket gets a sendmsg call from user space * Since both of those are syncronous operations, we are guaranteed * never to have pending data when we poll for it * so theres nothing to do here but return. * We need this though so netpoll recognizes us as an interface that * supports polling, which enables bridge devices in virt setups to * still use netconsole */ return; } #endif static const struct net_device_ops tun_netdev_ops = { .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, .ndo_start_xmit = tun_net_xmit, .ndo_change_mtu = tun_net_change_mtu, .ndo_fix_features = tun_net_fix_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = tun_poll_controller, #endif }; static const struct net_device_ops tap_netdev_ops = { .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, .ndo_start_xmit = tun_net_xmit, .ndo_change_mtu = tun_net_change_mtu, .ndo_fix_features = tun_net_fix_features, .ndo_set_rx_mode = tun_net_mclist, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = tun_poll_controller, #endif }; /* Initialize net device. */ static void tun_net_init(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); switch (tun->flags & TUN_TYPE_MASK) { case TUN_TUN_DEV: dev->netdev_ops = &tun_netdev_ops; /* Point-to-Point TUN Device */ dev->hard_header_len = 0; dev->addr_len = 0; dev->mtu = 1500; /* Zero header length */ dev->type = ARPHRD_NONE; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ break; case TUN_TAP_DEV: dev->netdev_ops = &tap_netdev_ops; /* Ethernet TAP Device */ ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; eth_hw_addr_random(dev); dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ break; } } /* Character device part */ /* Poll */ static unsigned int tun_chr_poll(struct file *file, poll_table * wait) { struct tun_file *tfile = file->private_data; struct tun_struct *tun = __tun_get(tfile); struct sock *sk; unsigned int mask = 0; if (!tun) return POLLERR; sk = tun->socket.sk; tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); poll_wait(file, &tun->wq.wait, wait); if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= POLLIN | POLLRDNORM; if (sock_writeable(sk) || (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) && sock_writeable(sk))) mask |= POLLOUT | POLLWRNORM; if (tun->dev->reg_state != NETREG_REGISTERED) mask = POLLERR; tun_put(tun); return mask; } /* prepad is the amount to reserve at front. len is length after that. * linear is a hint as to how much to copy (usually headers). */ static struct sk_buff *tun_alloc_skb(struct tun_struct *tun, size_t prepad, size_t len, size_t linear, int noblock) { struct sock *sk = tun->socket.sk; struct sk_buff *skb; int err; sock_update_classid(sk); /* Under a page? Don't bother with paged skb. */ if (prepad + len < PAGE_SIZE || !linear) linear = len; skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, &err); if (!skb) return ERR_PTR(err); skb_reserve(skb, prepad); skb_put(skb, linear); skb->data_len = len - linear; skb->len += len - linear; return skb; } /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, const struct iovec *iv, size_t count, int noblock) { struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; struct sk_buff *skb; size_t len = count, align = NET_SKB_PAD; struct virtio_net_hdr gso = { 0 }; int offset = 0; if (!(tun->flags & TUN_NO_PI)) { if ((len -= sizeof(pi)) > count) return -EINVAL; if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) return -EFAULT; offset += sizeof(pi); } if (tun->flags & TUN_VNET_HDR) { if ((len -= tun->vnet_hdr_sz) > count) return -EINVAL; if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) return -EFAULT; if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && gso.csum_start + gso.csum_offset + 2 > gso.hdr_len) gso.hdr_len = gso.csum_start + gso.csum_offset + 2; if (gso.hdr_len > len) return -EINVAL; offset += tun->vnet_hdr_sz; } if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { align += NET_IP_ALIGN; if (unlikely(len < ETH_HLEN || (gso.hdr_len && gso.hdr_len < ETH_HLEN))) return -EINVAL; } skb = tun_alloc_skb(tun, align, len, gso.hdr_len, noblock); if (IS_ERR(skb)) { if (PTR_ERR(skb) != -EAGAIN) tun->dev->stats.rx_dropped++; return PTR_ERR(skb); } if (skb_copy_datagram_from_iovec(skb, 0, iv, offset, len)) { tun->dev->stats.rx_dropped++; kfree_skb(skb); return -EFAULT; } if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { if (!skb_partial_csum_set(skb, gso.csum_start, gso.csum_offset)) { tun->dev->stats.rx_frame_errors++; kfree_skb(skb); return -EINVAL; } } switch (tun->flags & TUN_TYPE_MASK) { case TUN_TUN_DEV: if (tun->flags & TUN_NO_PI) { switch (skb->data[0] & 0xf0) { case 0x40: pi.proto = htons(ETH_P_IP); break; case 0x60: pi.proto = htons(ETH_P_IPV6); break; default: tun->dev->stats.rx_dropped++; kfree_skb(skb); return -EINVAL; } } skb_reset_mac_header(skb); skb->protocol = pi.proto; skb->dev = tun->dev; break; case TUN_TAP_DEV: skb->protocol = eth_type_trans(skb, tun->dev); break; } if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { pr_debug("GSO!\n"); switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; break; case VIRTIO_NET_HDR_GSO_TCPV6: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; break; case VIRTIO_NET_HDR_GSO_UDP: skb_shinfo(skb)->gso_type = SKB_GSO_UDP; break; default: tun->dev->stats.rx_frame_errors++; kfree_skb(skb); return -EINVAL; } if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; skb_shinfo(skb)->gso_size = gso.gso_size; if (skb_shinfo(skb)->gso_size == 0) { tun->dev->stats.rx_frame_errors++; kfree_skb(skb); return -EINVAL; } /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; } netif_rx_ni(skb); tun->dev->stats.rx_packets++; tun->dev->stats.rx_bytes += len; return count; } static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv, unsigned long count, loff_t pos) { struct file *file = iocb->ki_filp; struct tun_struct *tun = tun_get(file); ssize_t result; if (!tun) return -EBADFD; tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count); result = tun_get_user(tun, iv, iov_length(iv, count), file->f_flags & O_NONBLOCK); tun_put(tun); return result; } /* Put packet to the user space buffer */ static ssize_t tun_put_user(struct tun_struct *tun, struct sk_buff *skb, const struct iovec *iv, int len) { struct tun_pi pi = { 0, skb->protocol }; ssize_t total = 0; if (!(tun->flags & TUN_NO_PI)) { if ((len -= sizeof(pi)) < 0) return -EINVAL; if (len < skb->len) { /* Packet will be striped */ pi.flags |= TUN_PKT_STRIP; } if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi))) return -EFAULT; total += sizeof(pi); } if (tun->flags & TUN_VNET_HDR) { struct virtio_net_hdr gso = { 0 }; /* no info leak */ if ((len -= tun->vnet_hdr_sz) < 0) return -EINVAL; if (skb_is_gso(skb)) { struct skb_shared_info *sinfo = skb_shinfo(skb); /* This is a hint as to how much should be linear. */ gso.hdr_len = skb_headlen(skb); gso.gso_size = sinfo->gso_size; if (sinfo->gso_type & SKB_GSO_TCPV4) gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (sinfo->gso_type & SKB_GSO_TCPV6) gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; else if (sinfo->gso_type & SKB_GSO_UDP) gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; else { pr_err("unexpected GSO type: " "0x%x, gso_size %d, hdr_len %d\n", sinfo->gso_type, gso.gso_size, gso.hdr_len); print_hex_dump(KERN_ERR, "tun: ", DUMP_PREFIX_NONE, 16, 1, skb->head, min((int)gso.hdr_len, 64), true); WARN_ON_ONCE(1); return -EINVAL; } if (sinfo->gso_type & SKB_GSO_TCP_ECN) gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN; } else gso.gso_type = VIRTIO_NET_HDR_GSO_NONE; if (skb->ip_summed == CHECKSUM_PARTIAL) { gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; gso.csum_start = skb_checksum_start_offset(skb); gso.csum_offset = skb->csum_offset; } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { gso.flags = VIRTIO_NET_HDR_F_DATA_VALID; } /* else everything is zero */ if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total, sizeof(gso)))) return -EFAULT; total += tun->vnet_hdr_sz; } len = min_t(int, skb->len, len); skb_copy_datagram_const_iovec(skb, 0, iv, total, len); total += skb->len; tun->dev->stats.tx_packets++; tun->dev->stats.tx_bytes += len; return total; } static ssize_t tun_do_read(struct tun_struct *tun, struct kiocb *iocb, const struct iovec *iv, ssize_t len, int noblock) { DECLARE_WAITQUEUE(wait, current); struct sk_buff *skb; ssize_t ret = 0; tun_debug(KERN_INFO, tun, "tun_chr_read\n"); if (unlikely(!noblock)) add_wait_queue(&tun->wq.wait, &wait); while (len) { current->state = TASK_INTERRUPTIBLE; /* Read frames from the queue */ if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) { if (noblock) { ret = -EAGAIN; break; } if (signal_pending(current)) { ret = -ERESTARTSYS; break; } if (tun->dev->reg_state != NETREG_REGISTERED) { ret = -EIO; break; } /* Nothing to read, let's sleep */ schedule(); continue; } netif_wake_queue(tun->dev); ret = tun_put_user(tun, skb, iv, len); kfree_skb(skb); break; } current->state = TASK_RUNNING; if (unlikely(!noblock)) remove_wait_queue(&tun->wq.wait, &wait); return ret; } static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, unsigned long count, loff_t pos) { struct file *file = iocb->ki_filp; struct tun_file *tfile = file->private_data; struct tun_struct *tun = __tun_get(tfile); ssize_t len, ret; if (!tun) return -EBADFD; len = iov_length(iv, count); if (len < 0) { ret = -EINVAL; goto out; } ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK); ret = min_t(ssize_t, ret, len); out: tun_put(tun); return ret; } static void tun_setup(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); tun->owner = -1; tun->group = -1; dev->ethtool_ops = &tun_ethtool_ops; dev->destructor = tun_free_netdev; } /* Trivial set of netlink ops to allow deleting tun or tap * device with netlink. */ static int tun_validate(struct nlattr *tb[], struct nlattr *data[]) { return -EINVAL; } static struct rtnl_link_ops tun_link_ops __read_mostly = { .kind = DRV_NAME, .priv_size = sizeof(struct tun_struct), .setup = tun_setup, .validate = tun_validate, }; static void tun_sock_write_space(struct sock *sk) { struct tun_struct *tun; wait_queue_head_t *wqueue; if (!sock_writeable(sk)) return; if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) return; wqueue = sk_sleep(sk); if (wqueue && waitqueue_active(wqueue)) wake_up_interruptible_sync_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND); tun = tun_sk(sk)->tun; kill_fasync(&tun->fasync, SIGIO, POLL_OUT); } static void tun_sock_destruct(struct sock *sk) { free_netdev(tun_sk(sk)->tun->dev); } static int tun_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { struct tun_struct *tun = container_of(sock, struct tun_struct, socket); return tun_get_user(tun, m->msg_iov, total_len, m->msg_flags & MSG_DONTWAIT); } static int tun_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct tun_struct *tun = container_of(sock, struct tun_struct, socket); int ret; if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) return -EINVAL; ret = tun_do_read(tun, iocb, m->msg_iov, total_len, flags & MSG_DONTWAIT); if (ret > total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; } return ret; } static int tun_release(struct socket *sock) { if (sock->sk) sock_put(sock->sk); return 0; } /* Ops structure to mimic raw sockets with tun */ static const struct proto_ops tun_socket_ops = { .sendmsg = tun_sendmsg, .recvmsg = tun_recvmsg, .release = tun_release, }; static struct proto tun_proto = { .name = "tun", .owner = THIS_MODULE, .obj_size = sizeof(struct tun_sock), }; static int tun_flags(struct tun_struct *tun) { int flags = 0; if (tun->flags & TUN_TUN_DEV) flags |= IFF_TUN; else flags |= IFF_TAP; if (tun->flags & TUN_NO_PI) flags |= IFF_NO_PI; if (tun->flags & TUN_ONE_QUEUE) flags |= IFF_ONE_QUEUE; if (tun->flags & TUN_VNET_HDR) flags |= IFF_VNET_HDR; return flags; } static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); return sprintf(buf, "0x%x\n", tun_flags(tun)); } static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%d\n", tun->owner); } static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%d\n", tun->group); } static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); static DEVICE_ATTR(group, 0444, tun_show_group, NULL); static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) { struct sock *sk; struct tun_struct *tun; struct net_device *dev; int err; dev = __dev_get_by_name(net, ifr->ifr_name); if (dev) { const struct cred *cred = current_cred(); if (ifr->ifr_flags & IFF_TUN_EXCL) return -EBUSY; if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) tun = netdev_priv(dev); else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) tun = netdev_priv(dev); else return -EINVAL; if (((tun->owner != -1 && cred->euid != tun->owner) || (tun->group != -1 && !in_egroup_p(tun->group))) && !capable(CAP_NET_ADMIN)) return -EPERM; err = security_tun_dev_attach(tun->socket.sk); if (err < 0) return err; err = tun_attach(tun, file); if (err < 0) return err; } else { char *name; unsigned long flags = 0; if (!capable(CAP_NET_ADMIN)) return -EPERM; err = security_tun_dev_create(); if (err < 0) return err; /* Set dev type */ if (ifr->ifr_flags & IFF_TUN) { /* TUN device */ flags |= TUN_TUN_DEV; name = "tun%d"; } else if (ifr->ifr_flags & IFF_TAP) { /* TAP device */ flags |= TUN_TAP_DEV; name = "tap%d"; } else return -EINVAL; if (*ifr->ifr_name) name = ifr->ifr_name; dev = alloc_netdev(sizeof(struct tun_struct), name, tun_setup); if (!dev) return -ENOMEM; dev_net_set(dev, net); dev->rtnl_link_ops = &tun_link_ops; tun = netdev_priv(dev); tun->dev = dev; tun->flags = flags; tun->txflt.count = 0; tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); err = -ENOMEM; sk = sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL, &tun_proto); if (!sk) goto err_free_dev; sk_change_net(sk, net); tun->socket.wq = &tun->wq; init_waitqueue_head(&tun->wq.wait); tun->socket.ops = &tun_socket_ops; sock_init_data(&tun->socket, sk); sk->sk_write_space = tun_sock_write_space; sk->sk_sndbuf = INT_MAX; tun_sk(sk)->tun = tun; security_tun_dev_post_create(sk); tun_net_init(dev); dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | TUN_USER_FEATURES; dev->features = dev->hw_features; err = register_netdevice(tun->dev); if (err < 0) goto err_free_sk; if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) || device_create_file(&tun->dev->dev, &dev_attr_owner) || device_create_file(&tun->dev->dev, &dev_attr_group)) pr_err("Failed to create tun sysfs files\n"); sk->sk_destruct = tun_sock_destruct; err = tun_attach(tun, file); if (err < 0) goto failed; } tun_debug(KERN_INFO, tun, "tun_set_iff\n"); if (ifr->ifr_flags & IFF_NO_PI) tun->flags |= TUN_NO_PI; else tun->flags &= ~TUN_NO_PI; if (ifr->ifr_flags & IFF_ONE_QUEUE) tun->flags |= TUN_ONE_QUEUE; else tun->flags &= ~TUN_ONE_QUEUE; if (ifr->ifr_flags & IFF_VNET_HDR) tun->flags |= TUN_VNET_HDR; else tun->flags &= ~TUN_VNET_HDR; /* Make sure persistent devices do not get stuck in * xoff state. */ if (netif_running(tun->dev)) netif_wake_queue(tun->dev); strcpy(ifr->ifr_name, tun->dev->name); return 0; err_free_sk: tun_free_netdev(dev); err_free_dev: free_netdev(dev); failed: return err; } static int tun_get_iff(struct net *net, struct tun_struct *tun, struct ifreq *ifr) { tun_debug(KERN_INFO, tun, "tun_get_iff\n"); strcpy(ifr->ifr_name, tun->dev->name); ifr->ifr_flags = tun_flags(tun); return 0; } /* This is like a cut-down ethtool ops, except done via tun fd so no * privs required. */ static int set_offload(struct tun_struct *tun, unsigned long arg) { netdev_features_t features = 0; if (arg & TUN_F_CSUM) { features |= NETIF_F_HW_CSUM; arg &= ~TUN_F_CSUM; if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { if (arg & TUN_F_TSO_ECN) { features |= NETIF_F_TSO_ECN; arg &= ~TUN_F_TSO_ECN; } if (arg & TUN_F_TSO4) features |= NETIF_F_TSO; if (arg & TUN_F_TSO6) features |= NETIF_F_TSO6; arg &= ~(TUN_F_TSO4|TUN_F_TSO6); } if (arg & TUN_F_UFO) { features |= NETIF_F_UFO; arg &= ~TUN_F_UFO; } } /* This gives the user a way to test for new features in future by * trying to set them. */ if (arg) return -EINVAL; tun->set_features = features; netdev_update_features(tun->dev); return 0; } static long __tun_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg, int ifreq_len) { struct tun_file *tfile = file->private_data; struct tun_struct *tun; void __user* argp = (void __user*)arg; struct sock_fprog fprog; struct ifreq ifr; int sndbuf; int vnet_hdr_sz; int ret; #ifdef CONFIG_ANDROID_PARANOID_NETWORK if (cmd != TUNGETIFF && !capable(CAP_NET_ADMIN)) { return -EPERM; } #endif if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) if (copy_from_user(&ifr, argp, ifreq_len)) return -EFAULT; if (cmd == TUNGETFEATURES) { /* Currently this just means: "what IFF flags are valid?". * This is needed because we never checked for invalid flags on * TUNSETIFF. */ return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR, (unsigned int __user*)argp); } rtnl_lock(); tun = __tun_get(tfile); if (cmd == TUNSETIFF && !tun) { ifr.ifr_name[IFNAMSIZ-1] = '\0'; ret = tun_set_iff(tfile->net, file, &ifr); if (ret) goto unlock; if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; goto unlock; } ret = -EBADFD; if (!tun) goto unlock; tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %d\n", cmd); ret = 0; switch (cmd) { case TUNGETIFF: ret = tun_get_iff(current->nsproxy->net_ns, tun, &ifr); if (ret) break; if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; break; case TUNSETNOCSUM: /* Disable/Enable checksum */ /* [unimplemented] */ tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n", arg ? "disabled" : "enabled"); break; case TUNSETPERSIST: /* Disable/Enable persist mode */ if (arg) tun->flags |= TUN_PERSIST; else tun->flags &= ~TUN_PERSIST; tun_debug(KERN_INFO, tun, "persist %s\n", arg ? "enabled" : "disabled"); break; case TUNSETOWNER: /* Set owner of the device */ tun->owner = (uid_t) arg; tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner); break; case TUNSETGROUP: /* Set group of the device */ tun->group= (gid_t) arg; tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group); break; case TUNSETLINK: /* Only allow setting the type when the interface is down */ if (tun->dev->flags & IFF_UP) { tun_debug(KERN_INFO, tun, "Linktype set failed because interface is up\n"); ret = -EBUSY; } else { tun->dev->type = (int) arg; tun_debug(KERN_INFO, tun, "linktype set to %d\n", tun->dev->type); ret = 0; } break; #ifdef TUN_DEBUG case TUNSETDEBUG: tun->debug = arg; break; #endif case TUNSETOFFLOAD: ret = set_offload(tun, arg); break; case TUNSETTXFILTER: /* Can be set only for TAPs */ ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) break; ret = update_filter(&tun->txflt, (void __user *)arg); break; case SIOCGIFHWADDR: /* Get hw address */ memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); ifr.ifr_hwaddr.sa_family = tun->dev->type; if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; break; case SIOCSIFHWADDR: /* Set hw address */ tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", ifr.ifr_hwaddr.sa_data); ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); break; case TUNGETSNDBUF: sndbuf = tun->socket.sk->sk_sndbuf; if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) ret = -EFAULT; break; case TUNSETSNDBUF: if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { ret = -EFAULT; break; } tun->socket.sk->sk_sndbuf = sndbuf; break; case TUNGETVNETHDRSZ: vnet_hdr_sz = tun->vnet_hdr_sz; if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) ret = -EFAULT; break; case TUNSETVNETHDRSZ: if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { ret = -EFAULT; break; } if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { ret = -EINVAL; break; } tun->vnet_hdr_sz = vnet_hdr_sz; break; case TUNATTACHFILTER: /* Can be set only for TAPs */ ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) break; ret = -EFAULT; if (copy_from_user(&fprog, argp, sizeof(fprog))) break; ret = sk_attach_filter(&fprog, tun->socket.sk); break; case TUNDETACHFILTER: /* Can be set only for TAPs */ ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) break; ret = sk_detach_filter(tun->socket.sk); break; default: ret = -EINVAL; break; } unlock: rtnl_unlock(); if (tun) tun_put(tun); return ret; } static long tun_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); } #ifdef CONFIG_COMPAT static long tun_chr_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case TUNSETIFF: case TUNGETIFF: case TUNSETTXFILTER: case TUNGETSNDBUF: case TUNSETSNDBUF: case SIOCGIFHWADDR: case SIOCSIFHWADDR: arg = (unsigned long)compat_ptr(arg); break; default: arg = (compat_ulong_t)arg; break; } /* * compat_ifreq is shorter than ifreq, so we must not access beyond * the end of that structure. All fields that are used in this * driver are compatible though, we don't need to convert the * contents. */ return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); } #endif /* CONFIG_COMPAT */ static int tun_chr_fasync(int fd, struct file *file, int on) { struct tun_struct *tun = tun_get(file); int ret; if (!tun) return -EBADFD; tun_debug(KERN_INFO, tun, "tun_chr_fasync %d\n", on); if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0) goto out; if (on) { ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0); if (ret) goto out; tun->flags |= TUN_FASYNC; } else tun->flags &= ~TUN_FASYNC; ret = 0; out: tun_put(tun); return ret; } static int tun_chr_open(struct inode *inode, struct file * file) { struct tun_file *tfile; DBG1(KERN_INFO, "tunX: tun_chr_open\n"); tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); if (!tfile) return -ENOMEM; atomic_set(&tfile->count, 0); tfile->tun = NULL; tfile->net = get_net(current->nsproxy->net_ns); file->private_data = tfile; return 0; } static int tun_chr_close(struct inode *inode, struct file *file) { struct tun_file *tfile = file->private_data; struct tun_struct *tun; tun = __tun_get(tfile); if (tun) { struct net_device *dev = tun->dev; tun_debug(KERN_INFO, tun, "tun_chr_close\n"); __tun_detach(tun); /* If desirable, unregister the netdevice. */ if (!(tun->flags & TUN_PERSIST)) { rtnl_lock(); if (dev->reg_state == NETREG_REGISTERED) unregister_netdevice(dev); rtnl_unlock(); } } tun = tfile->tun; if (tun) sock_put(tun->socket.sk); put_net(tfile->net); kfree(tfile); return 0; } static const struct file_operations tun_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = do_sync_read, .aio_read = tun_chr_aio_read, .write = do_sync_write, .aio_write = tun_chr_aio_write, .poll = tun_chr_poll, .unlocked_ioctl = tun_chr_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = tun_chr_compat_ioctl, #endif .open = tun_chr_open, .release = tun_chr_close, .fasync = tun_chr_fasync }; static struct miscdevice tun_miscdev = { .minor = TUN_MINOR, .name = "tun", .nodename = "net/tun", .fops = &tun_fops, }; /* ethtool interface */ static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { cmd->supported = 0; cmd->advertising = 0; ethtool_cmd_speed_set(cmd, SPEED_10); cmd->duplex = DUPLEX_FULL; cmd->port = PORT_TP; cmd->phy_address = 0; cmd->transceiver = XCVR_INTERNAL; cmd->autoneg = AUTONEG_DISABLE; cmd->maxtxpkt = 0; cmd->maxrxpkt = 0; return 0; } static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct tun_struct *tun = netdev_priv(dev); strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); switch (tun->flags & TUN_TYPE_MASK) { case TUN_TUN_DEV: strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); break; case TUN_TAP_DEV: strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); break; } } static u32 tun_get_msglevel(struct net_device *dev) { #ifdef TUN_DEBUG struct tun_struct *tun = netdev_priv(dev); return tun->debug; #else return -EOPNOTSUPP; #endif } static void tun_set_msglevel(struct net_device *dev, u32 value) { #ifdef TUN_DEBUG struct tun_struct *tun = netdev_priv(dev); tun->debug = value; #endif } static const struct ethtool_ops tun_ethtool_ops = { .get_settings = tun_get_settings, .get_drvinfo = tun_get_drvinfo, .get_msglevel = tun_get_msglevel, .set_msglevel = tun_set_msglevel, .get_link = ethtool_op_get_link, }; static int __init tun_init(void) { int ret = 0; pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); pr_info("%s\n", DRV_COPYRIGHT); ret = rtnl_link_register(&tun_link_ops); if (ret) { pr_err("Can't register link_ops\n"); goto err_linkops; } ret = misc_register(&tun_miscdev); if (ret) { pr_err("Can't register misc device %d\n", TUN_MINOR); goto err_misc; } return 0; err_misc: rtnl_link_unregister(&tun_link_ops); err_linkops: return ret; } static void tun_cleanup(void) { misc_deregister(&tun_miscdev); rtnl_link_unregister(&tun_link_ops); } /* Get an underlying socket object from tun file. Returns error unless file is * attached to a device. The returned object works like a packet socket, it * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for * holding a reference to the file for as long as the socket is in use. */ struct socket *tun_get_socket(struct file *file) { struct tun_struct *tun; if (file->f_op != &tun_fops) return ERR_PTR(-EINVAL); tun = tun_get(file); if (!tun) return ERR_PTR(-EBADFD); tun_put(tun); return &tun->socket; } EXPORT_SYMBOL_GPL(tun_get_socket); module_init(tun_init); module_exit(tun_cleanup); MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR(DRV_COPYRIGHT); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(TUN_MINOR); MODULE_ALIAS("devname:net/tun");
gpl-2.0
openwrt/linux
net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
2893
7853
/* * (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * (C) 2011 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/icmp.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <net/secure_seq.h> #include <net/checksum.h> #include <net/route.h> #include <net/ip.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_nat_core.h> #include <net/netfilter/nf_nat_l3proto.h> #include <net/netfilter/nf_nat_l4proto.h> static const struct nf_nat_l3proto nf_nat_l3proto_ipv4; #ifdef CONFIG_XFRM static void nf_nat_ipv4_decode_session(struct sk_buff *skb, const struct nf_conn *ct, enum ip_conntrack_dir dir, unsigned long statusbit, struct flowi *fl) { const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple; struct flowi4 *fl4 = &fl->u.ip4; if (ct->status & statusbit) { fl4->daddr = t->dst.u3.ip; if (t->dst.protonum == IPPROTO_TCP || t->dst.protonum == IPPROTO_UDP || t->dst.protonum == IPPROTO_UDPLITE || t->dst.protonum == IPPROTO_DCCP || t->dst.protonum == IPPROTO_SCTP) fl4->fl4_dport = t->dst.u.all; } statusbit ^= IPS_NAT_MASK; if (ct->status & statusbit) { fl4->saddr = t->src.u3.ip; if (t->dst.protonum == IPPROTO_TCP || t->dst.protonum == IPPROTO_UDP || t->dst.protonum == IPPROTO_UDPLITE || t->dst.protonum == IPPROTO_DCCP || t->dst.protonum == IPPROTO_SCTP) fl4->fl4_sport = t->src.u.all; } } #endif /* CONFIG_XFRM */ static bool nf_nat_ipv4_in_range(const struct nf_conntrack_tuple *t, const struct nf_nat_range *range) { return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) && ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip); } static u32 nf_nat_ipv4_secure_port(const struct nf_conntrack_tuple *t, __be16 dport) { return secure_ipv4_port_ephemeral(t->src.u3.ip, t->dst.u3.ip, dport); } static bool nf_nat_ipv4_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, const struct nf_nat_l4proto *l4proto, const struct nf_conntrack_tuple *target, enum nf_nat_manip_type maniptype) { struct iphdr *iph; unsigned int hdroff; if (!skb_make_writable(skb, iphdroff + sizeof(*iph))) return false; iph = (void *)skb->data + iphdroff; hdroff = iphdroff + iph->ihl * 4; if (!l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv4, iphdroff, hdroff, target, maniptype)) return false; iph = (void *)skb->data + iphdroff; if (maniptype == NF_NAT_MANIP_SRC) { csum_replace4(&iph->check, iph->saddr, target->src.u3.ip); iph->saddr = target->src.u3.ip; } else { csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip); iph->daddr = target->dst.u3.ip; } return true; } static void nf_nat_ipv4_csum_update(struct sk_buff *skb, unsigned int iphdroff, __sum16 *check, const struct nf_conntrack_tuple *t, enum nf_nat_manip_type maniptype) { struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); __be32 oldip, newip; if (maniptype == NF_NAT_MANIP_SRC) { oldip = iph->saddr; newip = t->src.u3.ip; } else { oldip = iph->daddr; newip = t->dst.u3.ip; } inet_proto_csum_replace4(check, skb, oldip, newip, 1); } static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb, u8 proto, void *data, __sum16 *check, int datalen, int oldlen) { const struct iphdr *iph = ip_hdr(skb); struct rtable *rt = skb_rtable(skb); if (skb->ip_summed != CHECKSUM_PARTIAL) { if (!(rt->rt_flags & RTCF_LOCAL) && (!skb->dev || skb->dev->features & NETIF_F_V4_CSUM)) { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = skb_headroom(skb) + skb_network_offset(skb) + ip_hdrlen(skb); skb->csum_offset = (void *)check - data; *check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen, proto, 0); } else { *check = 0; *check = csum_tcpudp_magic(iph->saddr, iph->daddr, datalen, proto, csum_partial(data, datalen, 0)); if (proto == IPPROTO_UDP && !*check) *check = CSUM_MANGLED_0; } } else inet_proto_csum_replace2(check, skb, htons(oldlen), htons(datalen), 1); } static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[], struct nf_nat_range *range) { if (tb[CTA_NAT_V4_MINIP]) { range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]); range->flags |= NF_NAT_RANGE_MAP_IPS; } if (tb[CTA_NAT_V4_MAXIP]) range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]); else range->max_addr.ip = range->min_addr.ip; return 0; } static const struct nf_nat_l3proto nf_nat_l3proto_ipv4 = { .l3proto = NFPROTO_IPV4, .in_range = nf_nat_ipv4_in_range, .secure_port = nf_nat_ipv4_secure_port, .manip_pkt = nf_nat_ipv4_manip_pkt, .csum_update = nf_nat_ipv4_csum_update, .csum_recalc = nf_nat_ipv4_csum_recalc, .nlattr_to_range = nf_nat_ipv4_nlattr_to_range, #ifdef CONFIG_XFRM .decode_session = nf_nat_ipv4_decode_session, #endif }; int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int hooknum) { struct { struct icmphdr icmp; struct iphdr ip; } *inside; enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); enum nf_nat_manip_type manip = HOOK2MANIP(hooknum); unsigned int hdrlen = ip_hdrlen(skb); const struct nf_nat_l4proto *l4proto; struct nf_conntrack_tuple target; unsigned long statusbit; NF_CT_ASSERT(ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY); if (!skb_make_writable(skb, hdrlen + sizeof(*inside))) return 0; if (nf_ip_checksum(skb, hooknum, hdrlen, 0)) return 0; inside = (void *)skb->data + hdrlen; if (inside->icmp.type == ICMP_REDIRECT) { if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK) return 0; if (ct->status & IPS_NAT_MASK) return 0; } if (manip == NF_NAT_MANIP_SRC) statusbit = IPS_SRC_NAT; else statusbit = IPS_DST_NAT; /* Invert if this is reply direction */ if (dir == IP_CT_DIR_REPLY) statusbit ^= IPS_NAT_MASK; if (!(ct->status & statusbit)) return 1; l4proto = __nf_nat_l4proto_find(NFPROTO_IPV4, inside->ip.protocol); if (!nf_nat_ipv4_manip_pkt(skb, hdrlen + sizeof(inside->icmp), l4proto, &ct->tuplehash[!dir].tuple, !manip)) return 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { /* Reloading "inside" here since manip_pkt may reallocate */ inside = (void *)skb->data + hdrlen; inside->icmp.checksum = 0; inside->icmp.checksum = csum_fold(skb_checksum(skb, hdrlen, skb->len - hdrlen, 0)); } /* Change outer to look like the reply to an incoming packet */ nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); l4proto = __nf_nat_l4proto_find(NFPROTO_IPV4, 0); if (!nf_nat_ipv4_manip_pkt(skb, 0, l4proto, &target, manip)) return 0; return 1; } EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation); static int __init nf_nat_l3proto_ipv4_init(void) { int err; err = nf_nat_l4proto_register(NFPROTO_IPV4, &nf_nat_l4proto_icmp); if (err < 0) goto err1; err = nf_nat_l3proto_register(&nf_nat_l3proto_ipv4); if (err < 0) goto err2; return err; err2: nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_icmp); err1: return err; } static void __exit nf_nat_l3proto_ipv4_exit(void) { nf_nat_l3proto_unregister(&nf_nat_l3proto_ipv4); nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_icmp); } MODULE_LICENSE("GPL"); MODULE_ALIAS("nf-nat-" __stringify(AF_INET)); module_init(nf_nat_l3proto_ipv4_init); module_exit(nf_nat_l3proto_ipv4_exit);
gpl-2.0
sztena/DG12
sound/usb/caiaq/device.c
2893
14716
/* * caiaq.c: ALSA driver for caiaq/NativeInstruments devices * * Copyright (c) 2007 Daniel Mack <daniel@caiaq.de> * Karsten Wiese <fzu@wemgehoertderstaat.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/moduleparam.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/init.h> #include <linux/gfp.h> #include <linux/usb.h> #include <sound/initval.h> #include <sound/core.h> #include <sound/pcm.h> #include "device.h" #include "audio.h" #include "midi.h" #include "control.h" #include "input.h" MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); MODULE_DESCRIPTION("caiaq USB audio"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2}," "{Native Instruments, RigKontrol3}," "{Native Instruments, Kore Controller}," "{Native Instruments, Kore Controller 2}," "{Native Instruments, Audio Kontrol 1}," "{Native Instruments, Audio 2 DJ}," "{Native Instruments, Audio 4 DJ}," "{Native Instruments, Audio 8 DJ}," "{Native Instruments, Traktor Audio 2}," "{Native Instruments, Session I/O}," "{Native Instruments, GuitarRig mobile}" "{Native Instruments, Traktor Kontrol X1}" "{Native Instruments, Traktor Kontrol S4}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ static char* id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static int snd_card_used[SNDRV_CARDS]; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the caiaq sound device"); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for the caiaq soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable the caiaq soundcard."); enum { SAMPLERATE_44100 = 0, SAMPLERATE_48000 = 1, SAMPLERATE_96000 = 2, SAMPLERATE_192000 = 3, SAMPLERATE_88200 = 4, SAMPLERATE_INVALID = 0xff }; enum { DEPTH_NONE = 0, DEPTH_16 = 1, DEPTH_24 = 2, DEPTH_32 = 3 }; static struct usb_device_id snd_usb_id_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_RIGKONTROL2 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_RIGKONTROL3 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_KORECONTROLLER }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_KORECONTROLLER2 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_AK1 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_AUDIO8DJ }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_SESSIONIO }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_GUITARRIGMOBILE }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_AUDIO4DJ }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_AUDIO2DJ }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_TRAKTORKONTROLX1 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_TRAKTORKONTROLS4 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_TRAKTORAUDIO2 }, { /* terminator */ } }; static void usb_ep1_command_reply_dispatch (struct urb* urb) { int ret; struct snd_usb_caiaqdev *dev = urb->context; unsigned char *buf = urb->transfer_buffer; if (urb->status || !dev) { log("received EP1 urb->status = %i\n", urb->status); return; } switch(buf[0]) { case EP1_CMD_GET_DEVICE_INFO: memcpy(&dev->spec, buf+1, sizeof(struct caiaq_device_spec)); dev->spec.fw_version = le16_to_cpu(dev->spec.fw_version); debug("device spec (firmware %d): audio: %d in, %d out, " "MIDI: %d in, %d out, data alignment %d\n", dev->spec.fw_version, dev->spec.num_analog_audio_in, dev->spec.num_analog_audio_out, dev->spec.num_midi_in, dev->spec.num_midi_out, dev->spec.data_alignment); dev->spec_received++; wake_up(&dev->ep1_wait_queue); break; case EP1_CMD_AUDIO_PARAMS: dev->audio_parm_answer = buf[1]; wake_up(&dev->ep1_wait_queue); break; case EP1_CMD_MIDI_READ: snd_usb_caiaq_midi_handle_input(dev, buf[1], buf + 3, buf[2]); break; case EP1_CMD_READ_IO: if (dev->chip.usb_id == USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ)) { if (urb->actual_length > sizeof(dev->control_state)) urb->actual_length = sizeof(dev->control_state); memcpy(dev->control_state, buf + 1, urb->actual_length); wake_up(&dev->ep1_wait_queue); break; } #ifdef CONFIG_SND_USB_CAIAQ_INPUT case EP1_CMD_READ_ERP: case EP1_CMD_READ_ANALOG: snd_usb_caiaq_input_dispatch(dev, buf, urb->actual_length); #endif break; } dev->ep1_in_urb.actual_length = 0; ret = usb_submit_urb(&dev->ep1_in_urb, GFP_ATOMIC); if (ret < 0) log("unable to submit urb. OOM!?\n"); } int snd_usb_caiaq_send_command(struct snd_usb_caiaqdev *dev, unsigned char command, const unsigned char *buffer, int len) { int actual_len; struct usb_device *usb_dev = dev->chip.dev; if (!usb_dev) return -EIO; if (len > EP1_BUFSIZE - 1) len = EP1_BUFSIZE - 1; if (buffer && len > 0) memcpy(dev->ep1_out_buf+1, buffer, len); dev->ep1_out_buf[0] = command; return usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, 1), dev->ep1_out_buf, len+1, &actual_len, 200); } int snd_usb_caiaq_set_audio_params (struct snd_usb_caiaqdev *dev, int rate, int depth, int bpp) { int ret; char tmp[5]; switch (rate) { case 44100: tmp[0] = SAMPLERATE_44100; break; case 48000: tmp[0] = SAMPLERATE_48000; break; case 88200: tmp[0] = SAMPLERATE_88200; break; case 96000: tmp[0] = SAMPLERATE_96000; break; case 192000: tmp[0] = SAMPLERATE_192000; break; default: return -EINVAL; } switch (depth) { case 16: tmp[1] = DEPTH_16; break; case 24: tmp[1] = DEPTH_24; break; default: return -EINVAL; } tmp[2] = bpp & 0xff; tmp[3] = bpp >> 8; tmp[4] = 1; /* packets per microframe */ debug("setting audio params: %d Hz, %d bits, %d bpp\n", rate, depth, bpp); dev->audio_parm_answer = -1; ret = snd_usb_caiaq_send_command(dev, EP1_CMD_AUDIO_PARAMS, tmp, sizeof(tmp)); if (ret) return ret; if (!wait_event_timeout(dev->ep1_wait_queue, dev->audio_parm_answer >= 0, HZ)) return -EPIPE; if (dev->audio_parm_answer != 1) debug("unable to set the device's audio params\n"); else dev->bpp = bpp; return dev->audio_parm_answer == 1 ? 0 : -EINVAL; } int snd_usb_caiaq_set_auto_msg(struct snd_usb_caiaqdev *dev, int digital, int analog, int erp) { char tmp[3] = { digital, analog, erp }; return snd_usb_caiaq_send_command(dev, EP1_CMD_AUTO_MSG, tmp, sizeof(tmp)); } static void __devinit setup_card(struct snd_usb_caiaqdev *dev) { int ret; char val[4]; /* device-specific startup specials */ switch (dev->chip.usb_id) { case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL2): /* RigKontrol2 - display centered dash ('-') */ val[0] = 0x00; val[1] = 0x00; val[2] = 0x01; snd_usb_caiaq_send_command(dev, EP1_CMD_WRITE_IO, val, 3); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL3): /* RigKontrol2 - display two centered dashes ('--') */ val[0] = 0x00; val[1] = 0x40; val[2] = 0x40; val[3] = 0x00; snd_usb_caiaq_send_command(dev, EP1_CMD_WRITE_IO, val, 4); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AK1): /* Audio Kontrol 1 - make USB-LED stop blinking */ val[0] = 0x00; snd_usb_caiaq_send_command(dev, EP1_CMD_WRITE_IO, val, 1); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ): /* Audio 8 DJ - trigger read of current settings */ dev->control_state[0] = 0xff; snd_usb_caiaq_set_auto_msg(dev, 1, 0, 0); snd_usb_caiaq_send_command(dev, EP1_CMD_READ_IO, NULL, 0); if (!wait_event_timeout(dev->ep1_wait_queue, dev->control_state[0] != 0xff, HZ)) return; /* fix up some defaults */ if ((dev->control_state[1] != 2) || (dev->control_state[2] != 3) || (dev->control_state[4] != 2)) { dev->control_state[1] = 2; dev->control_state[2] = 3; dev->control_state[4] = 2; snd_usb_caiaq_send_command(dev, EP1_CMD_WRITE_IO, dev->control_state, 6); } break; } if (dev->spec.num_analog_audio_out + dev->spec.num_analog_audio_in + dev->spec.num_digital_audio_out + dev->spec.num_digital_audio_in > 0) { ret = snd_usb_caiaq_audio_init(dev); if (ret < 0) log("Unable to set up audio system (ret=%d)\n", ret); } if (dev->spec.num_midi_in + dev->spec.num_midi_out > 0) { ret = snd_usb_caiaq_midi_init(dev); if (ret < 0) log("Unable to set up MIDI system (ret=%d)\n", ret); } #ifdef CONFIG_SND_USB_CAIAQ_INPUT ret = snd_usb_caiaq_input_init(dev); if (ret < 0) log("Unable to set up input system (ret=%d)\n", ret); #endif /* finally, register the card and all its sub-instances */ ret = snd_card_register(dev->chip.card); if (ret < 0) { log("snd_card_register() returned %d\n", ret); snd_card_free(dev->chip.card); } ret = snd_usb_caiaq_control_init(dev); if (ret < 0) log("Unable to set up control system (ret=%d)\n", ret); } static int create_card(struct usb_device *usb_dev, struct usb_interface *intf, struct snd_card **cardp) { int devnum; int err; struct snd_card *card; struct snd_usb_caiaqdev *dev; for (devnum = 0; devnum < SNDRV_CARDS; devnum++) if (enable[devnum] && !snd_card_used[devnum]) break; if (devnum >= SNDRV_CARDS) return -ENODEV; err = snd_card_create(index[devnum], id[devnum], THIS_MODULE, sizeof(struct snd_usb_caiaqdev), &card); if (err < 0) return err; dev = caiaqdev(card); dev->chip.dev = usb_dev; dev->chip.card = card; dev->chip.usb_id = USB_ID(le16_to_cpu(usb_dev->descriptor.idVendor), le16_to_cpu(usb_dev->descriptor.idProduct)); spin_lock_init(&dev->spinlock); snd_card_set_dev(card, &intf->dev); *cardp = card; return 0; } static int __devinit init_card(struct snd_usb_caiaqdev *dev) { char *c, usbpath[32]; struct usb_device *usb_dev = dev->chip.dev; struct snd_card *card = dev->chip.card; int err, len; if (usb_set_interface(usb_dev, 0, 1) != 0) { log("can't set alt interface.\n"); return -EIO; } usb_init_urb(&dev->ep1_in_urb); usb_init_urb(&dev->midi_out_urb); usb_fill_bulk_urb(&dev->ep1_in_urb, usb_dev, usb_rcvbulkpipe(usb_dev, 0x1), dev->ep1_in_buf, EP1_BUFSIZE, usb_ep1_command_reply_dispatch, dev); usb_fill_bulk_urb(&dev->midi_out_urb, usb_dev, usb_sndbulkpipe(usb_dev, 0x1), dev->midi_out_buf, EP1_BUFSIZE, snd_usb_caiaq_midi_output_done, dev); init_waitqueue_head(&dev->ep1_wait_queue); init_waitqueue_head(&dev->prepare_wait_queue); if (usb_submit_urb(&dev->ep1_in_urb, GFP_KERNEL) != 0) return -EIO; err = snd_usb_caiaq_send_command(dev, EP1_CMD_GET_DEVICE_INFO, NULL, 0); if (err) return err; if (!wait_event_timeout(dev->ep1_wait_queue, dev->spec_received, HZ)) return -ENODEV; usb_string(usb_dev, usb_dev->descriptor.iManufacturer, dev->vendor_name, CAIAQ_USB_STR_LEN); usb_string(usb_dev, usb_dev->descriptor.iProduct, dev->product_name, CAIAQ_USB_STR_LEN); strlcpy(card->driver, MODNAME, sizeof(card->driver)); strlcpy(card->shortname, dev->product_name, sizeof(card->shortname)); strlcpy(card->mixername, dev->product_name, sizeof(card->mixername)); /* if the id was not passed as module option, fill it with a shortened * version of the product string which does not contain any * whitespaces */ if (*card->id == '\0') { char id[sizeof(card->id)]; memset(id, 0, sizeof(id)); for (c = card->shortname, len = 0; *c && len < sizeof(card->id); c++) if (*c != ' ') id[len++] = *c; snd_card_set_id(card, id); } usb_make_path(usb_dev, usbpath, sizeof(usbpath)); snprintf(card->longname, sizeof(card->longname), "%s %s (%s)", dev->vendor_name, dev->product_name, usbpath); setup_card(dev); return 0; } static int __devinit snd_probe(struct usb_interface *intf, const struct usb_device_id *id) { int ret; struct snd_card *card; struct usb_device *device = interface_to_usbdev(intf); ret = create_card(device, intf, &card); if (ret < 0) return ret; usb_set_intfdata(intf, card); ret = init_card(caiaqdev(card)); if (ret < 0) { log("unable to init card! (ret=%d)\n", ret); snd_card_free(card); return ret; } return 0; } static void snd_disconnect(struct usb_interface *intf) { struct snd_usb_caiaqdev *dev; struct snd_card *card = usb_get_intfdata(intf); debug("%s(%p)\n", __func__, intf); if (!card) return; dev = caiaqdev(card); snd_card_disconnect(card); #ifdef CONFIG_SND_USB_CAIAQ_INPUT snd_usb_caiaq_input_free(dev); #endif snd_usb_caiaq_audio_free(dev); usb_kill_urb(&dev->ep1_in_urb); usb_kill_urb(&dev->midi_out_urb); snd_card_free(card); usb_reset_device(interface_to_usbdev(intf)); } MODULE_DEVICE_TABLE(usb, snd_usb_id_table); static struct usb_driver snd_usb_driver = { .name = MODNAME, .probe = snd_probe, .disconnect = snd_disconnect, .id_table = snd_usb_id_table, }; static int __init snd_module_init(void) { return usb_register(&snd_usb_driver); } static void __exit snd_module_exit(void) { usb_deregister(&snd_usb_driver); } module_init(snd_module_init) module_exit(snd_module_exit)
gpl-2.0
SlimRoms/kernel_sony_msm8974
drivers/media/video/videobuf2-vmalloc.c
4429
5064
/* * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2 * * Copyright (C) 2010 Samsung Electronics * * Author: Pawel Osciak <pawel@osciak.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. */ #include <linux/io.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <media/videobuf2-core.h> #include <media/videobuf2-memops.h> struct vb2_vmalloc_buf { void *vaddr; struct page **pages; struct vm_area_struct *vma; int write; unsigned long size; unsigned int n_pages; atomic_t refcount; struct vb2_vmarea_handler handler; }; static void vb2_vmalloc_put(void *buf_priv); static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size) { struct vb2_vmalloc_buf *buf; buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (!buf) return NULL; buf->size = size; buf->vaddr = vmalloc_user(buf->size); buf->handler.refcount = &buf->refcount; buf->handler.put = vb2_vmalloc_put; buf->handler.arg = buf; if (!buf->vaddr) { pr_debug("vmalloc of size %ld failed\n", buf->size); kfree(buf); return NULL; } atomic_inc(&buf->refcount); return buf; } static void vb2_vmalloc_put(void *buf_priv) { struct vb2_vmalloc_buf *buf = buf_priv; if (atomic_dec_and_test(&buf->refcount)) { vfree(buf->vaddr); kfree(buf); } } static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, unsigned long size, int write) { struct vb2_vmalloc_buf *buf; unsigned long first, last; int n_pages, offset; struct vm_area_struct *vma; dma_addr_t physp; buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (!buf) return NULL; buf->write = write; offset = vaddr & ~PAGE_MASK; buf->size = size; vma = find_vma(current->mm, vaddr); if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) { if (vb2_get_contig_userptr(vaddr, size, &vma, &physp)) goto fail_pages_array_alloc; buf->vma = vma; buf->vaddr = ioremap_nocache(physp, size); if (!buf->vaddr) goto fail_pages_array_alloc; } else { first = vaddr >> PAGE_SHIFT; last = (vaddr + size - 1) >> PAGE_SHIFT; buf->n_pages = last - first + 1; buf->pages = kzalloc(buf->n_pages * sizeof(struct page *), GFP_KERNEL); if (!buf->pages) goto fail_pages_array_alloc; /* current->mm->mmap_sem is taken by videobuf2 core */ n_pages = get_user_pages(current, current->mm, vaddr & PAGE_MASK, buf->n_pages, write, 1, /* force */ buf->pages, NULL); if (n_pages != buf->n_pages) goto fail_get_user_pages; buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1, PAGE_KERNEL); if (!buf->vaddr) goto fail_get_user_pages; } buf->vaddr += offset; return buf; fail_get_user_pages: pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages, buf->n_pages); while (--n_pages >= 0) put_page(buf->pages[n_pages]); kfree(buf->pages); fail_pages_array_alloc: kfree(buf); return NULL; } static void vb2_vmalloc_put_userptr(void *buf_priv) { struct vb2_vmalloc_buf *buf = buf_priv; unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; unsigned int i; if (buf->pages) { if (vaddr) vm_unmap_ram((void *)vaddr, buf->n_pages); for (i = 0; i < buf->n_pages; ++i) { if (buf->write) set_page_dirty_lock(buf->pages[i]); put_page(buf->pages[i]); } kfree(buf->pages); } else { if (buf->vma) vb2_put_vma(buf->vma); iounmap(buf->vaddr); } kfree(buf); } static void *vb2_vmalloc_vaddr(void *buf_priv) { struct vb2_vmalloc_buf *buf = buf_priv; if (!buf->vaddr) { pr_err("Address of an unallocated plane requested " "or cannot map user pointer\n"); return NULL; } return buf->vaddr; } static unsigned int vb2_vmalloc_num_users(void *buf_priv) { struct vb2_vmalloc_buf *buf = buf_priv; return atomic_read(&buf->refcount); } static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma) { struct vb2_vmalloc_buf *buf = buf_priv; int ret; if (!buf) { pr_err("No memory to map\n"); return -EINVAL; } ret = remap_vmalloc_range(vma, buf->vaddr, 0); if (ret) { pr_err("Remapping vmalloc memory, error: %d\n", ret); return ret; } /* * Make sure that vm_areas for 2 buffers won't be merged together */ vma->vm_flags |= VM_DONTEXPAND; /* * Use common vm_area operations to track buffer refcount. */ vma->vm_private_data = &buf->handler; vma->vm_ops = &vb2_common_vm_ops; vma->vm_ops->open(vma); return 0; } const struct vb2_mem_ops vb2_vmalloc_memops = { .alloc = vb2_vmalloc_alloc, .put = vb2_vmalloc_put, .get_userptr = vb2_vmalloc_get_userptr, .put_userptr = vb2_vmalloc_put_userptr, .vaddr = vb2_vmalloc_vaddr, .mmap = vb2_vmalloc_mmap, .num_users = vb2_vmalloc_num_users, }; EXPORT_SYMBOL_GPL(vb2_vmalloc_memops); MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2"); MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>"); MODULE_LICENSE("GPL");
gpl-2.0
realthunder/a33_linux
drivers/staging/vt6655/iwctl.c
4941
66385
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: iwctl.c * * Purpose: wireless ext & ioctl functions * * Author: Lyndon Chen * * Date: July 5, 2006 * * Functions: * * Revision History: * */ #include "device.h" #include "ioctl.h" #include "iocmd.h" #include "mac.h" #include "card.h" #include "hostap.h" #include "power.h" #include "rf.h" #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT #include "iowpa.h" #include "wpactl.h" #endif #include <net/iw_handler.h> extern unsigned short TxRate_iwconfig;//2008-5-8 <add> by chester /*--------------------- Static Definitions -------------------------*/ //2008-0409-07, <Add> by Einsn Liu #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT #define SUPPORTED_WIRELESS_EXT 18 #else #define SUPPORTED_WIRELESS_EXT 17 #endif static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484, 4915, 4920, 4925, 4935, 4940, 4945, 4960, 4980, 5035, 5040, 5045, 5055, 5060, 5080, 5170, 5180, 5190, 5200, 5210, 5220, 5230, 5240, 5260, 5280, 5300, 5320, 5500, 5520, 5540, 5560, 5580, 5600, 5620, 5640, 5660, 5680, 5700, 5745, 5765, 5785, 5805, 5825 }; /*--------------------- Static Classes ----------------------------*/ //static int msglevel =MSG_LEVEL_DEBUG; static int msglevel =MSG_LEVEL_INFO; /*--------------------- Static Variables --------------------------*/ /*--------------------- Static Functions --------------------------*/ /*--------------------- Export Variables --------------------------*/ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev) { PSDevice pDevice = netdev_priv(dev); long ldBm; pDevice->wstats.status = pDevice->eOPMode; #ifdef Calcu_LinkQual #if 0 if(pDevice->byBBType == BB_TYPE_11B) { if(pDevice->byCurrSQ > 120) pDevice->scStatistic.LinkQuality = 100; else pDevice->scStatistic.LinkQuality = pDevice->byCurrSQ*100/120; } else if(pDevice->byBBType == BB_TYPE_11G) { if(pDevice->byCurrSQ < 20) pDevice->scStatistic.LinkQuality = 100; else if(pDevice->byCurrSQ >96) pDevice->scStatistic.LinkQuality = 0; else pDevice->scStatistic.LinkQuality = (96-pDevice->byCurrSQ)*100/76; } if(pDevice->bLinkPass !=true) pDevice->scStatistic.LinkQuality = 0; #endif if(pDevice->scStatistic.LinkQuality > 100) pDevice->scStatistic.LinkQuality = 100; pDevice->wstats.qual.qual =(unsigned char) pDevice->scStatistic.LinkQuality; #else pDevice->wstats.qual.qual = pDevice->byCurrSQ; #endif RFvRSSITodBm(pDevice, (unsigned char)(pDevice->uCurrRSSI), &ldBm); pDevice->wstats.qual.level = ldBm; //pDevice->wstats.qual.level = 0x100 - pDevice->uCurrRSSI; pDevice->wstats.qual.noise = 0; pDevice->wstats.qual.updated = 1; pDevice->wstats.discard.nwid = 0; pDevice->wstats.discard.code = 0; pDevice->wstats.discard.fragment = 0; pDevice->wstats.discard.retries = (unsigned long)pDevice->scStatistic.dwTsrErr; pDevice->wstats.discard.misc = 0; pDevice->wstats.miss.beacon = 0; return &pDevice->wstats; } /*------------------------------------------------------------------*/ static int iwctl_commit(struct net_device *dev, struct iw_request_info *info, void *wrq, char *extra) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWCOMMIT \n"); return 0; } /* * Wireless Handler : get protocol name */ int iwctl_giwname(struct net_device *dev, struct iw_request_info *info, char *wrq, char *extra) { strcpy(wrq, "802.11-a/b/g"); return 0; } /* * Wireless Handler : set scan */ int iwctl_siwscan(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); struct iw_scan_req *req = (struct iw_scan_req *)extra; unsigned char abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1]; PWLAN_IE_SSID pItemSSID=NULL; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWSCAN \n"); if(pDevice->byReAssocCount > 0) { //reject scan when re-associating! //send scan event to wpa_Supplicant union iwreq_data wrqu; PRINT_K("wireless_send_event--->SIOCGIWSCAN(scan done)\n"); memset(&wrqu, 0, sizeof(wrqu)); wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL); return 0; } spin_lock_irq(&pDevice->lock); BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass); //mike add: active scan OR passive scan OR desire_ssid scan if(wrq->length == sizeof(struct iw_scan_req)) { if (wrq->flags & IW_SCAN_THIS_ESSID) { //desire_ssid scan memset(abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1); pItemSSID = (PWLAN_IE_SSID)abyScanSSID; pItemSSID->byElementID = WLAN_EID_SSID; memcpy(pItemSSID->abySSID, req->essid, (int)req->essid_len); if (pItemSSID->abySSID[req->essid_len - 1] == '\0') { if(req->essid_len>0) pItemSSID->len = req->essid_len - 1; } else pItemSSID->len = req->essid_len; pMgmt->eScanType = WMAC_SCAN_PASSIVE; PRINT_K("SIOCSIWSCAN:[desired_ssid=%s,len=%d]\n",((PWLAN_IE_SSID)abyScanSSID)->abySSID, ((PWLAN_IE_SSID)abyScanSSID)->len); bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID); spin_unlock_irq(&pDevice->lock); return 0; } else if(req->scan_type == IW_SCAN_TYPE_PASSIVE) { //passive scan pMgmt->eScanType = WMAC_SCAN_PASSIVE; } } else { //active scan pMgmt->eScanType = WMAC_SCAN_ACTIVE; } pMgmt->eScanType = WMAC_SCAN_PASSIVE; //printk("SIOCSIWSCAN:WLAN_CMD_BSSID_SCAN\n"); bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL); spin_unlock_irq(&pDevice->lock); return 0; } /* * Wireless Handler : get scan results */ int iwctl_giwscan(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { int ii, jj, kk; PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); PKnownBSS pBSS; PWLAN_IE_SSID pItemSSID; PWLAN_IE_SUPP_RATES pSuppRates, pExtSuppRates; char *current_ev = extra; char *end_buf = extra + IW_SCAN_MAX_DATA; char *current_val = NULL; struct iw_event iwe; long ldBm; char buf[MAX_WPA_IE_LEN * 2 + 30]; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWSCAN \n"); if (pMgmt->eScanState == WMAC_IS_SCANNING) { // In scanning.. return -EAGAIN; } pBSS = &(pMgmt->sBSSList[0]); for (ii = 0, jj = 0; jj < MAX_BSS_NUM ; jj++) { if (current_ev >= end_buf) break; pBSS = &(pMgmt->sBSSList[jj]); if (pBSS->bActive) { //ADD mac address memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, pBSS->abyBSSID, WLAN_BSSID_LEN); current_ev = iwe_stream_add_event(info,current_ev,end_buf, &iwe, IW_EV_ADDR_LEN); //ADD ssid memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWESSID; pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID; iwe.u.data.length = pItemSSID->len; iwe.u.data.flags = 1; current_ev = iwe_stream_add_point(info,current_ev,end_buf, &iwe, pItemSSID->abySSID); //ADD mode memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWMODE; if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo)) { iwe.u.mode = IW_MODE_INFRA; } else { iwe.u.mode = IW_MODE_ADHOC; } iwe.len = IW_EV_UINT_LEN; current_ev = iwe_stream_add_event(info,current_ev, end_buf, &iwe, IW_EV_UINT_LEN); //ADD frequency pSuppRates = (PWLAN_IE_SUPP_RATES)pBSS->abySuppRates; pExtSuppRates = (PWLAN_IE_SUPP_RATES)pBSS->abyExtSuppRates; memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = pBSS->uChannel; iwe.u.freq.e = 0; iwe.u.freq.i = 0; current_ev = iwe_stream_add_event(info,current_ev,end_buf, &iwe, IW_EV_FREQ_LEN); //2008-0409-04, <Add> by Einsn Liu { int f = (int)pBSS->uChannel - 1; if(f < 0)f = 0; iwe.u.freq.m = frequency_list[f] * 100000; iwe.u.freq.e = 1; } current_ev = iwe_stream_add_event(info,current_ev,end_buf, &iwe, IW_EV_FREQ_LEN); //ADD quality memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVQUAL; RFvRSSITodBm(pDevice, (unsigned char)(pBSS->uRSSI), &ldBm); iwe.u.qual.level = ldBm; iwe.u.qual.noise = 0; //2008-0409-01, <Add> by Einsn Liu if(-ldBm<50){ iwe.u.qual.qual = 100; }else if(-ldBm > 90) { iwe.u.qual.qual = 0; }else { iwe.u.qual.qual=(40-(-ldBm-50))*100/40; } iwe.u.qual.updated=7; // iwe.u.qual.qual = 0; current_ev = iwe_stream_add_event(info,current_ev, end_buf, &iwe, IW_EV_QUAL_LEN); memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWENCODE; iwe.u.data.length = 0; if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo)) { iwe.u.data.flags =IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; }else { iwe.u.data.flags = IW_ENCODE_DISABLED; } current_ev = iwe_stream_add_point(info,current_ev,end_buf, &iwe, pItemSSID->abySSID); memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWRATE; iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; current_val = current_ev + IW_EV_LCP_LEN; for (kk = 0 ; kk < 12 ; kk++) { if (pSuppRates->abyRates[kk] == 0) break; // Bit rate given in 500 kb/s units (+ 0x80) iwe.u.bitrate.value = ((pSuppRates->abyRates[kk] & 0x7f) * 500000); current_val = iwe_stream_add_value(info,current_ev, current_val, end_buf, &iwe, IW_EV_PARAM_LEN); } for (kk = 0 ; kk < 8 ; kk++) { if (pExtSuppRates->abyRates[kk] == 0) break; // Bit rate given in 500 kb/s units (+ 0x80) iwe.u.bitrate.value = ((pExtSuppRates->abyRates[kk] & 0x7f) * 500000); current_val = iwe_stream_add_value(info,current_ev, current_val, end_buf, &iwe, IW_EV_PARAM_LEN); } if((current_val - current_ev) > IW_EV_LCP_LEN) current_ev = current_val; memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVCUSTOM; sprintf(buf, "bcn_int=%d", pBSS->wBeaconInterval); iwe.u.data.length = strlen(buf); current_ev = iwe_stream_add_point(info,current_ev, end_buf, &iwe, buf); if ((pBSS->wWPALen > 0) && (pBSS->wWPALen <= MAX_WPA_IE_LEN)) { memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVGENIE; iwe.u.data.length = pBSS->wWPALen; current_ev = iwe_stream_add_point(info,current_ev, end_buf, &iwe, pBSS->byWPAIE); } if ((pBSS->wRSNLen > 0) && (pBSS->wRSNLen <= MAX_WPA_IE_LEN)) { memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVGENIE; iwe.u.data.length = pBSS->wRSNLen; current_ev = iwe_stream_add_point(info,current_ev, end_buf, &iwe, pBSS->byRSNIE); } } }// for wrq->length = current_ev - extra; return 0; } /* * Wireless Handler : set frequence or channel */ int iwctl_siwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); int rc = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWFREQ \n"); // If setting by frequency, convert to a channel if((wrq->e == 1) && (wrq->m >= (int) 2.412e8) && (wrq->m <= (int) 2.487e8)) { int f = wrq->m / 100000; int c = 0; while((c < 14) && (f != frequency_list[c])) c++; wrq->e = 0; wrq->m = c + 1; } // Setting by channel number if((wrq->m > 14) || (wrq->e > 0)) rc = -EOPNOTSUPP; else { int channel = wrq->m; if((channel < 1) || (channel > 14)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: New channel value of %d is invalid!\n", dev->name, wrq->m); rc = -EINVAL; } else { // Yes ! We can set it !!! DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " Set to channel = %d\n", channel); pDevice->uChannel = channel; //2007-0207-04,<Add> by EinsnLiu //Make change effect at once pDevice->bCommit = true; } } return rc; } /* * Wireless Handler : get frequence or channel */ int iwctl_giwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWFREQ \n"); #ifdef WEXT_USECHANNELS wrq->m = (int)pMgmt->uCurrChannel; wrq->e = 0; #else { int f = (int)pMgmt->uCurrChannel - 1; if(f < 0) f = 0; wrq->m = frequency_list[f] * 100000; wrq->e = 1; } #endif return 0; } /* * Wireless Handler : set operation mode */ int iwctl_siwmode(struct net_device *dev, struct iw_request_info *info, __u32 *wmode, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int rc = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWMODE \n"); if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP && pDevice->bEnableHostapd) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Can't set operation mode, hostapd is running \n"); return rc; } switch(*wmode) { case IW_MODE_ADHOC: if (pMgmt->eConfigMode != WMAC_CONFIG_IBSS_STA) { pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA; if (pDevice->flags & DEVICE_FLAGS_OPENED) { pDevice->bCommit = true; } } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to ad-hoc \n"); break; case IW_MODE_AUTO: case IW_MODE_INFRA: if (pMgmt->eConfigMode != WMAC_CONFIG_ESS_STA) { pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA; if (pDevice->flags & DEVICE_FLAGS_OPENED) { pDevice->bCommit = true; } } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to infrastructure \n"); break; case IW_MODE_MASTER: pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA; rc = -EOPNOTSUPP; break; if (pMgmt->eConfigMode != WMAC_CONFIG_AP) { pMgmt->eConfigMode = WMAC_CONFIG_AP; if (pDevice->flags & DEVICE_FLAGS_OPENED) { pDevice->bCommit = true; } } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to Access Point \n"); break; case IW_MODE_REPEAT: pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA; rc = -EOPNOTSUPP; break; default: rc = -EINVAL; } return rc; } /* * Wireless Handler : get operation mode */ int iwctl_giwmode(struct net_device *dev, struct iw_request_info *info, __u32 *wmode, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWMODE \n"); // If not managed, assume it's ad-hoc switch (pMgmt->eConfigMode) { case WMAC_CONFIG_ESS_STA: *wmode = IW_MODE_INFRA; break; case WMAC_CONFIG_IBSS_STA: *wmode = IW_MODE_ADHOC; break; case WMAC_CONFIG_AUTO: *wmode = IW_MODE_INFRA; break; case WMAC_CONFIG_AP: *wmode = IW_MODE_MASTER; break; default: *wmode = IW_MODE_ADHOC; } return 0; } /* * Wireless Handler : get capability range */ int iwctl_giwrange(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { struct iw_range *range = (struct iw_range *) extra; int i,k; unsigned char abySupportedRates[13]= {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90}; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWRANGE \n"); if (wrq->pointer) { wrq->length = sizeof(struct iw_range); memset(range, 0, sizeof(struct iw_range)); range->min_nwid = 0x0000; range->max_nwid = 0x0000; range->num_channels = 14; // Should be based on cap_rid.country to give only // what the current card support k = 0; for(i = 0; i < 14; i++) { range->freq[k].i = i + 1; // List index range->freq[k].m = frequency_list[i] * 100000; range->freq[k++].e = 1; // Values in table in MHz -> * 10^5 * 10 } range->num_frequency = k; // Hum... Should put the right values there #ifdef Calcu_LinkQual range->max_qual.qual = 100; #else range->max_qual.qual = 255; #endif range->max_qual.level = 0; range->max_qual.noise = 0; range->sensitivity = 255; for(i = 0 ; i < 13 ; i++) { range->bitrate[i] = abySupportedRates[i] * 500000; if(range->bitrate[i] == 0) break; } range->num_bitrates = i; // Set an indication of the max TCP throughput // in bit/s that we can expect using this interface. // May be use for QoS stuff... Jean II if(i > 2) range->throughput = 5 * 1000 * 1000; else range->throughput = 1.5 * 1000 * 1000; range->min_rts = 0; range->max_rts = 2312; range->min_frag = 256; range->max_frag = 2312; // the encoding capabilities range->num_encoding_sizes = 3; // 64(40) bits WEP range->encoding_size[0] = 5; // 128(104) bits WEP range->encoding_size[1] = 13; // 256 bits for WPA-PSK range->encoding_size[2] = 32; // 4 keys are allowed range->max_encoding_tokens = 4; range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; range->min_pmp = 0; range->max_pmp = 1000000;// 1 secs range->min_pmt = 0; range->max_pmt = 1000000;// 1 secs range->pmp_flags = IW_POWER_PERIOD; range->pmt_flags = IW_POWER_TIMEOUT; range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R; // Transmit Power - values are in mW range->txpower[0] = 100; range->num_txpower = 1; range->txpower_capa = IW_TXPOW_MWATT; range->we_version_source = SUPPORTED_WIRELESS_EXT; range->we_version_compiled = WIRELESS_EXT; range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME; range->retry_flags = IW_RETRY_LIMIT; range->r_time_flags = IW_RETRY_LIFETIME; range->min_retry = 1; range->max_retry = 65535; range->min_r_time = 1024; range->max_r_time = 65535 * 1024; // Experimental measurements - boundary 11/5.5 Mb/s // Note : with or without the (local->rssi), results // are somewhat different. - Jean II range->avg_qual.qual = 6; range->avg_qual.level = 176; // -80 dBm range->avg_qual.noise = 0; } return 0; } /* * Wireless Handler : set ap mac address */ int iwctl_siwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int rc = 0; unsigned char ZeroBSSID[WLAN_BSSID_LEN]={0x00,0x00,0x00,0x00,0x00,0x00}; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWAP \n"); if (pMgmt->eScanState == WMAC_IS_SCANNING) { // In scanning.. printk("SIOCSIWAP(??)-->In scanning...\n"); // return -EAGAIN; } if (wrq->sa_family != ARPHRD_ETHER) rc = -EINVAL; else { memcpy(pMgmt->abyDesireBSSID, wrq->sa_data, 6); //2008-0409-05, <Add> by Einsn Liu if((pDevice->bLinkPass == true) && (memcmp(pMgmt->abyDesireBSSID, pMgmt->abyCurrBSSID, 6)== 0)){ return rc; } //mike :add if ((is_broadcast_ether_addr(pMgmt->abyDesireBSSID)) || (memcmp(pMgmt->abyDesireBSSID, ZeroBSSID, 6) == 0)){ PRINT_K("SIOCSIWAP:invalid desired BSSID return!\n"); return rc; } //mike add: if desired AP is hidden ssid(there are two same BSSID in list), // then ignore,because you don't known which one to be connect with?? { unsigned int ii , uSameBssidNum=0; for (ii = 0; ii < MAX_BSS_NUM; ii++) { if (pMgmt->sBSSList[ii].bActive && !compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID, pMgmt->abyDesireBSSID)) { uSameBssidNum++; } } if(uSameBssidNum >= 2) { //hit: desired AP is in hidden ssid mode!!! PRINT_K("SIOCSIWAP:ignore for desired AP in hidden mode\n"); return rc; } } if (pDevice->flags & DEVICE_FLAGS_OPENED) { pDevice->bCommit = true; } } return rc; } /* * Wireless Handler : get ap mac address */ int iwctl_giwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWAP \n"); memcpy(wrq->sa_data, pMgmt->abyCurrBSSID, 6); //2008-0410,<Modify> by Einsn Liu if ((pDevice->bLinkPass == false) && (pMgmt->eCurrMode != WMAC_MODE_ESS_AP)) memset(wrq->sa_data, 0, 6); if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { memcpy(wrq->sa_data, pMgmt->abyCurrBSSID, 6); } wrq->sa_family = ARPHRD_ETHER; return 0; } /* * Wireless Handler : get ap list */ int iwctl_giwaplist(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { int ii,jj, rc = 0; struct sockaddr sock[IW_MAX_AP]; struct iw_quality qual[IW_MAX_AP]; PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWAPLIST \n"); // Only super-user can see AP list if (!capable(CAP_NET_ADMIN)) { rc = -EPERM; return rc; } if (wrq->pointer) { PKnownBSS pBSS = &(pMgmt->sBSSList[0]); for (ii = 0, jj= 0; ii < MAX_BSS_NUM; ii++) { pBSS = &(pMgmt->sBSSList[ii]); if (!pBSS->bActive) continue; if ( jj >= IW_MAX_AP) break; memcpy(sock[jj].sa_data, pBSS->abyBSSID, 6); sock[jj].sa_family = ARPHRD_ETHER; qual[jj].level = pBSS->uRSSI; qual[jj].qual = qual[jj].noise = 0; qual[jj].updated = 2; jj++; } wrq->flags = 1; // Should be define'd wrq->length = jj; memcpy(extra, sock, sizeof(struct sockaddr)*jj); memcpy(extra + sizeof(struct sockaddr)*jj, qual, sizeof(struct iw_quality)*jj); } return rc; } /* * Wireless Handler : set essid */ int iwctl_siwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); PWLAN_IE_SSID pItemSSID; //2008-0409-05, <Add> by Einsn Liu unsigned char len; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWESSID \n"); pDevice->fWPA_Authened = false; if (pMgmt->eScanState == WMAC_IS_SCANNING) { // In scanning.. printk("SIOCSIWESSID(??)-->In scanning...\n"); // return -EAGAIN; } // Check if we asked for `any' if(wrq->flags == 0) { // Just send an empty SSID list memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1); memset(pMgmt->abyDesireBSSID, 0xFF,6); PRINT_K("set essid to 'any' \n"); #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT //Unknown desired AP,so here need not associate?? //if(pDevice->bWPASuppWextEnabled == true) { return 0; // } #endif } else { // Set the SSID memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1); pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID; pItemSSID->byElementID = WLAN_EID_SSID; memcpy(pItemSSID->abySSID, extra, wrq->length); if (pItemSSID->abySSID[wrq->length - 1] == '\0') { if(wrq->length>0) pItemSSID->len = wrq->length - 1; } else pItemSSID->len = wrq->length; printk("set essid to %s \n",pItemSSID->abySSID); //2008-0409-05, <Add> by Einsn Liu len=(pItemSSID->len > ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len)?pItemSSID->len:((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len; if((pDevice->bLinkPass == true) && (memcmp(pItemSSID->abySSID,((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID,len)==0)) return 0; //mike:need clear desiredBSSID if(pItemSSID->len==0) { memset(pMgmt->abyDesireBSSID, 0xFF,6); return 0; } #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT //Wext wil order another command of siwap to link with desired AP, //so here need not associate?? if(pDevice->bWPASuppWextEnabled == true) { /*******search if in hidden ssid mode ****/ { PKnownBSS pCurr = NULL; unsigned char abyTmpDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1]; unsigned int ii , uSameBssidNum=0; memcpy(abyTmpDesireSSID,pMgmt->abyDesireSSID,sizeof(abyTmpDesireSSID)); pCurr = BSSpSearchBSSList(pDevice, NULL, abyTmpDesireSSID, pMgmt->eConfigPHYMode ); if (pCurr == NULL){ PRINT_K("SIOCSIWESSID:hidden ssid site survey before associate.......\n"); vResetCommandTimer((void *) pDevice); pMgmt->eScanType = WMAC_SCAN_ACTIVE; bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID); bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID); } else { //mike:to find out if that desired SSID is a hidden-ssid AP , // by means of judging if there are two same BSSID exist in list ? for (ii = 0; ii < MAX_BSS_NUM; ii++) { if (pMgmt->sBSSList[ii].bActive && !compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID, pCurr->abyBSSID)) { uSameBssidNum++; } } if(uSameBssidNum >= 2) { //hit: desired AP is in hidden ssid mode!!! printk("SIOCSIWESSID:hidden ssid directly associate.......\n"); vResetCommandTimer((void *) pDevice); pMgmt->eScanType = WMAC_SCAN_PASSIVE; //this scan type,you'll submit scan result! bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID); bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID); } } } return 0; } #endif DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set essid = %s \n", pItemSSID->abySSID); } if (pDevice->flags & DEVICE_FLAGS_OPENED) { pDevice->bCommit = true; } return 0; } /* * Wireless Handler : get essid */ int iwctl_giwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); PWLAN_IE_SSID pItemSSID; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWESSID \n"); // Note : if wrq->u.data.flags != 0, we should // get the relevant SSID from the SSID list... // Get the current SSID pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID; //pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID; memcpy(extra, pItemSSID->abySSID , pItemSSID->len); extra[pItemSSID->len] = '\0'; wrq->length = pItemSSID->len + 1; //2008-0409-03, <Add> by Einsn Liu wrq->length = pItemSSID->len; wrq->flags = 1; // active return 0; } /* * Wireless Handler : set data rate */ int iwctl_siwrate(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); int rc = 0; u8 brate = 0; int i; unsigned char abySupportedRates[13]= {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90}; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWRATE \n"); if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) { rc = -EINVAL; return rc; } // First : get a valid bit rate value // Which type of value if((wrq->value < 13) && (wrq->value >= 0)) { // Setting by rate index // Find value in the magic rate table brate = wrq->value; } else { // Setting by frequency value u8 normvalue = (u8) (wrq->value/500000); // Check if rate is valid for(i = 0 ; i < 13 ; i++) { if(normvalue == abySupportedRates[i]) { brate = i; break; } } } // -1 designed the max rate (mostly auto mode) if(wrq->value == -1) { // Get the highest available rate for(i = 0 ; i < 13 ; i++) { if(abySupportedRates[i] == 0) break; } if(i != 0) brate = i - 1; } // Check that it is valid // brate is index of abySupportedRates[] if(brate > 13 ) { rc = -EINVAL; return rc; } // Now, check if we want a fixed or auto value if(wrq->fixed != 0) { // Fixed mode // One rate, fixed printk("Rate Fix\n"); pDevice->bFixRate = true; if ((pDevice->byBBType == BB_TYPE_11B)&& (brate > 3)) { pDevice->uConnectionRate = 3; } else { pDevice->uConnectionRate = brate; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Fixed to Rate %d \n", pDevice->uConnectionRate); } } else { pDevice->bFixRate = false; pDevice->uConnectionRate = 13; printk("auto rate:connection_rate is 13\n"); } return rc; } /* * Wireless Handler : get data rate */ int iwctl_giwrate(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); //2007-0118-05,<Mark> by EinsnLiu //Mark the unnecessary sentences. // PSMgmtObject pMgmt = &(pDevice->sMgmtObj); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWRATE \n"); { unsigned char abySupportedRates[13]= {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90}; int brate = 0; //2008-5-8 <modify> by chester if(pDevice->bLinkPass){ if(pDevice->bFixRate == true){ if (pDevice->uConnectionRate < 13) { brate = abySupportedRates[pDevice->uConnectionRate]; }else { if (pDevice->byBBType == BB_TYPE_11B) brate = 0x16; if (pDevice->byBBType == BB_TYPE_11G) brate = 0x6C; if (pDevice->byBBType == BB_TYPE_11A) brate = 0x6C; } } else { brate = abySupportedRates[TxRate_iwconfig]; } } else brate =0; //2007-0118-05,<Mark> by EinsnLiu //Mark the unnecessary sentences. /* if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { if (pDevice->byBBType == BB_TYPE_11B) brate = 0x16; if (pDevice->byBBType == BB_TYPE_11G) brate = 0x6C; if (pDevice->byBBType == BB_TYPE_11A) brate = 0x6C; } */ // if (pDevice->uConnectionRate == 13) // brate = abySupportedRates[pDevice->wCurrentRate]; wrq->value = brate * 500000; // If more than one rate, set auto if (pDevice->bFixRate == true) wrq->fixed = true; } return 0; } /* * Wireless Handler : set rts threshold */ int iwctl_siwrts(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); int rc = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWRTS \n"); { int rthr = wrq->value; if(wrq->disabled) rthr = 2312; if((rthr < 0) || (rthr > 2312)) { rc = -EINVAL; }else { pDevice->wRTSThreshold = rthr; } } return 0; } /* * Wireless Handler : get rts */ int iwctl_giwrts(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWRTS \n"); wrq->value = pDevice->wRTSThreshold; wrq->disabled = (wrq->value >= 2312); wrq->fixed = 1; return 0; } /* * Wireless Handler : set fragment threshold */ int iwctl_siwfrag(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); int rc = 0; int fthr = wrq->value; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWFRAG \n"); if (wrq->disabled) fthr = 2312; if((fthr < 256) || (fthr > 2312)) { rc = -EINVAL; }else { fthr &= ~0x1; // Get an even value pDevice->wFragmentationThreshold = (u16)fthr; } return rc; } /* * Wireless Handler : get fragment threshold */ int iwctl_giwfrag(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWFRAG \n"); wrq->value = pDevice->wFragmentationThreshold; wrq->disabled = (wrq->value >= 2312); wrq->fixed = 1; return 0; } /* * Wireless Handler : set retry threshold */ int iwctl_siwretry(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); int rc = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWRETRY \n"); if (wrq->disabled) { rc = -EINVAL; return rc; } if (wrq->flags & IW_RETRY_LIMIT) { if(wrq->flags & IW_RETRY_MAX) pDevice->byLongRetryLimit = wrq->value; else if (wrq->flags & IW_RETRY_MIN) pDevice->byShortRetryLimit = wrq->value; else { // No modifier : set both pDevice->byShortRetryLimit = wrq->value; pDevice->byLongRetryLimit = wrq->value; } } if (wrq->flags & IW_RETRY_LIFETIME) { pDevice->wMaxTransmitMSDULifetime = wrq->value; } return rc; } /* * Wireless Handler : get retry threshold */ int iwctl_giwretry(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWRETRY \n"); wrq->disabled = 0; // Can't be disabled // Note : by default, display the min retry number if((wrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) { wrq->flags = IW_RETRY_LIFETIME; wrq->value = (int)pDevice->wMaxTransmitMSDULifetime; //ms } else if((wrq->flags & IW_RETRY_MAX)) { wrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX; wrq->value = (int)pDevice->byLongRetryLimit; } else { wrq->flags = IW_RETRY_LIMIT; wrq->value = (int)pDevice->byShortRetryLimit; if((int)pDevice->byShortRetryLimit != (int)pDevice->byLongRetryLimit) wrq->flags |= IW_RETRY_MIN; } return 0; } /* * Wireless Handler : set encode mode */ int iwctl_siwencode(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); unsigned long dwKeyIndex = (unsigned long)(wrq->flags & IW_ENCODE_INDEX); int ii,uu, rc = 0; int index = (wrq->flags & IW_ENCODE_INDEX); //2007-0207-07,<Modify> by EinsnLiu //There are some problems when using iwconfig encode/key command to set the WEP key. //I almost rewrite this function. //now it support:(assume the wireless interface's name is eth0) //iwconfig eth0 key [1] 1122334455 open /*set key stirng to index 1,and driver using key index is set to 1*/ //iwconfig eth0 key [3] /*set driver using key index to 3,the key string no change */ //iwconfig eth0 key 1122334455 /*set key string to driver using index*/ //iwconfig eth0 key restricted /*enable share key*/ PSKeyTable pkeytab; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWENCODE \n"); if((wrq->flags & IW_ENCODE_DISABLED)==0){ //Not disable encryption if (dwKeyIndex > WLAN_WEP_NKEYS) { rc = -EINVAL; return rc; } if(dwKeyIndex<1&&((wrq->flags&IW_ENCODE_NOKEY)==0)){//set default key if(pDevice->byKeyIndex<WLAN_WEP_NKEYS){ dwKeyIndex=pDevice->byKeyIndex; } else dwKeyIndex=0; }else dwKeyIndex--; // Check the size of the key if (wrq->length > WLAN_WEP232_KEYLEN) { rc = -EINVAL; return rc; } if(wrq->length>0){//have key if (wrq->length == WLAN_WEP232_KEYLEN) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Set 232 bit wep key\n"); } else if (wrq->length == WLAN_WEP104_KEYLEN) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Set 104 bit wep key\n"); } else if (wrq->length == WLAN_WEP40_KEYLEN) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Set 40 bit wep key, index= %d\n", (int)dwKeyIndex); }else {//no support length rc = -EINVAL; return rc; } memset(pDevice->abyKey, 0, WLAN_WEP232_KEYLEN); memcpy(pDevice->abyKey, extra, wrq->length); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"abyKey: "); for (ii = 0; ii < wrq->length; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%02x ", pDevice->abyKey[ii]); } if (pDevice->flags & DEVICE_FLAGS_OPENED) { spin_lock_irq(&pDevice->lock); KeybSetDefaultKey(&(pDevice->sKey), (unsigned long)(dwKeyIndex | (1 << 31)), wrq->length, NULL, pDevice->abyKey, KEY_CTL_WEP, pDevice->PortOffset, pDevice->byLocalID ); spin_unlock_irq(&pDevice->lock); } pDevice->byKeyIndex = (unsigned char)dwKeyIndex; pDevice->uKeyLength = wrq->length; pDevice->bTransmitKey = true; pDevice->bEncryptionEnable = true; pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled; }else if(index>0){ //when the length is 0 the request only changes the default transmit key index //check the new key has a non zero lenget if(pDevice->bEncryptionEnable==false) { rc = -EINVAL; return rc; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Just set Default key Index:\n"); pkeytab=&(pDevice->sKey.KeyTable[MAX_KEY_TABLE-1]); if(pkeytab->GroupKey[(unsigned char)dwKeyIndex].uKeyLength==0){ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Default key len is 0\n"); rc = -EINVAL; return rc; } pDevice->byKeyIndex =(unsigned char)dwKeyIndex; pkeytab->dwGTKeyIndex =dwKeyIndex | (1 << 31); pkeytab->GroupKey[(unsigned char)dwKeyIndex].dwKeyIndex=dwKeyIndex | (1 << 31); } }else {//disable the key DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable WEP function\n"); if(pDevice->bEncryptionEnable==false) return 0; pMgmt->bShareKeyAlgorithm = false; pDevice->bEncryptionEnable = false; pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled; if (pDevice->flags & DEVICE_FLAGS_OPENED) { spin_lock_irq(&pDevice->lock); for(uu=0;uu<MAX_KEY_TABLE;uu++) MACvDisableKeyEntry(pDevice->PortOffset, uu); spin_unlock_irq(&pDevice->lock); } } //End Modify,Einsn /* DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWENCODE \n"); // Check the size of the key if (wrq->length > WLAN_WEP232_KEYLEN) { rc = -EINVAL; return rc; } if (dwKeyIndex > WLAN_WEP_NKEYS) { rc = -EINVAL; return rc; } if (dwKeyIndex > 0) dwKeyIndex--; // Send the key to the card if (wrq->length > 0) { if (wrq->length == WLAN_WEP232_KEYLEN) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Set 232 bit wep key\n"); } else if (wrq->length == WLAN_WEP104_KEYLEN) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Set 104 bit wep key\n"); } else if (wrq->length == WLAN_WEP40_KEYLEN) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Set 40 bit wep key, index= %d\n", (int)dwKeyIndex); } memset(pDevice->abyKey, 0, WLAN_WEP232_KEYLEN); memcpy(pDevice->abyKey, extra, wrq->length); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"abyKey: "); for (ii = 0; ii < wrq->length; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%02x ", pDevice->abyKey[ii]); } if (pDevice->flags & DEVICE_FLAGS_OPENED) { spin_lock_irq(&pDevice->lock); KeybSetDefaultKey(&(pDevice->sKey), (unsigned long)(pDevice->byKeyIndex | (1 << 31)), pDevice->uKeyLength, NULL, pDevice->abyKey, KEY_CTL_WEP, pDevice->PortOffset, pDevice->byLocalID ); spin_unlock_irq(&pDevice->lock); } pDevice->byKeyIndex = (unsigned char)dwKeyIndex; pDevice->uKeyLength = wrq->length; pDevice->bTransmitKey = true; pDevice->bEncryptionEnable = true; pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled; // Do we want to just set the transmit key index ? if ( index < 4 ) { pDevice->byKeyIndex = index; } else if(!(wrq->flags & IW_ENCODE_MODE)) { rc = -EINVAL; return rc; } } // Read the flags if(wrq->flags & IW_ENCODE_DISABLED){ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable WEP function\n"); pMgmt->bShareKeyAlgorithm = false; pDevice->bEncryptionEnable = false; pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled; if (pDevice->flags & DEVICE_FLAGS_OPENED) { spin_lock_irq(&pDevice->lock); for(uu=0;uu<MAX_KEY_TABLE;uu++) MACvDisableKeyEntry(pDevice->PortOffset, uu); spin_unlock_irq(&pDevice->lock); } } */ if(wrq->flags & IW_ENCODE_RESTRICTED) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable WEP & ShareKey System\n"); pMgmt->bShareKeyAlgorithm = true; } if(wrq->flags & IW_ENCODE_OPEN) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable WEP & Open System\n"); pMgmt->bShareKeyAlgorithm = false; } return rc; } /* * Wireless Handler : get encode mode */ /* int iwctl_giwencode(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int rc = 0; char abyKey[WLAN_WEP232_KEYLEN]; unsigned int index = (unsigned int)(wrq->flags & IW_ENCODE_INDEX); PSKeyItem pKey = NULL; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWENCODE\n"); //2007-0207-06,<Add> by EinsnLiu //the key index in iwconfig is 1-4 when our driver is 0-3 //so it can't be used directly. //if the index is 0,we should used the index set by driver. if (index > WLAN_WEP_NKEYS) { rc = -EINVAL; return rc; } if(index<1){//set default key if(pDevice->byKeyIndex<WLAN_WEP_NKEYS){ index=pDevice->byKeyIndex; } else index=0; }else index--; //End Add,Einsn memset(abyKey, 0, sizeof(abyKey)); // Check encryption mode wrq->flags = IW_ENCODE_NOKEY; // Is WEP enabled ??? if (pDevice->bEncryptionEnable) wrq->flags |= IW_ENCODE_ENABLED; else wrq->flags |= IW_ENCODE_DISABLED; if (pMgmt->bShareKeyAlgorithm) wrq->flags |= IW_ENCODE_RESTRICTED; else wrq->flags |= IW_ENCODE_OPEN; if (KeybGetKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, (unsigned char)index , &pKey)){ wrq->length = pKey->uKeyLength; memcpy(abyKey, pKey->abyKey, pKey->uKeyLength); //2007-0207-06,<Modify> by EinsnLiu //only get key success need to copy data //index should +1. //there is not necessary to return -EINVAL when get key failed //if return -EINVAL,the encryption item can't be display by the command "iwconfig". wrq->flags |= index+1; memcpy(extra, abyKey, WLAN_WEP232_KEYLEN); } //else { // rc = -EINVAL; // return rc; // } //End Modify,Einsn return 0; } */ //2008-0409-06, <Add> by Einsn Liu int iwctl_giwencode(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); char abyKey[WLAN_WEP232_KEYLEN]; unsigned int index = (unsigned int)(wrq->flags & IW_ENCODE_INDEX); PSKeyItem pKey = NULL; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWENCODE\n"); if (index > WLAN_WEP_NKEYS) { return -EINVAL; } if(index<1){//get default key if(pDevice->byKeyIndex<WLAN_WEP_NKEYS){ index=pDevice->byKeyIndex; } else index=0; }else index--; memset(abyKey, 0, WLAN_WEP232_KEYLEN); // Check encryption mode wrq->flags = IW_ENCODE_NOKEY; // Is WEP enabled ??? if (pDevice->bEncryptionEnable) wrq->flags |= IW_ENCODE_ENABLED; else wrq->flags |= IW_ENCODE_DISABLED; if (pMgmt->bShareKeyAlgorithm) wrq->flags |= IW_ENCODE_RESTRICTED; else wrq->flags |= IW_ENCODE_OPEN; wrq->length=0; if((index==0)&&(pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled|| pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)){//get wpa pairwise key if (KeybGetKey(&(pDevice->sKey),pMgmt->abyCurrBSSID, 0xffffffff, &pKey)){ wrq->length = pKey->uKeyLength; memcpy(abyKey, pKey->abyKey, pKey->uKeyLength); memcpy(extra, abyKey, WLAN_WEP232_KEYLEN); } }else if (KeybGetKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, (unsigned char)index , &pKey)){ wrq->length = pKey->uKeyLength; memcpy(abyKey, pKey->abyKey, pKey->uKeyLength); memcpy(extra, abyKey, WLAN_WEP232_KEYLEN); } wrq->flags |= index+1; return 0; } /* * Wireless Handler : set power mode */ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int rc = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER \n"); if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) { rc = -EINVAL; return rc; } if (wrq->disabled) { pDevice->ePSMode = WMAC_POWER_CAM; PSvDisablePowerSaving(pDevice); return rc; } if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { pDevice->ePSMode = WMAC_POWER_FAST; PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval); } else if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) { pDevice->ePSMode = WMAC_POWER_FAST; PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval); } switch (wrq->flags & IW_POWER_MODE) { case IW_POWER_UNICAST_R: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_UNICAST_R \n"); rc = -EINVAL; break; case IW_POWER_ALL_R: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_ALL_R \n"); rc = -EINVAL; case IW_POWER_ON: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_ON \n"); break; default: rc = -EINVAL; } return rc; } /* * Wireless Handler : get power mode */ int iwctl_giwpower(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int mode = pDevice->ePSMode; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWPOWER \n"); wrq->disabled = (mode == WMAC_POWER_CAM); if (wrq->disabled) return 0; if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { wrq->value = (int)((pMgmt->wListenInterval * pMgmt->wCurrBeaconPeriod) << 10); wrq->flags = IW_POWER_TIMEOUT; } else { wrq->value = (int)((pMgmt->wListenInterval * pMgmt->wCurrBeaconPeriod) << 10); wrq->flags = IW_POWER_PERIOD; } wrq->flags |= IW_POWER_ALL_R; return 0; } /* * Wireless Handler : get Sensitivity */ int iwctl_giwsens(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); long ldBm; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWSENS \n"); if (pDevice->bLinkPass == true) { RFvRSSITodBm(pDevice, (unsigned char)(pDevice->uCurrRSSI), &ldBm); wrq->value = ldBm; } else { wrq->value = 0; }; wrq->disabled = (wrq->value == 0); wrq->fixed = 1; return 0; } //2008-0409-07, <Add> by Einsn Liu #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT int iwctl_siwauth(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int ret=0; static int wpa_version=0; //must be static to save the last value,einsn liu static int pairwise=0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWAUTH \n"); switch (wrq->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: wpa_version = wrq->value; if(wrq->value == IW_AUTH_WPA_VERSION_DISABLED) { PRINT_K("iwctl_siwauth:set WPADEV to disable at 1??????\n"); //pDevice->bWPADevEnable = false; } else if(wrq->value == IW_AUTH_WPA_VERSION_WPA) { PRINT_K("iwctl_siwauth:set WPADEV to WPA1******\n"); } else { PRINT_K("iwctl_siwauth:set WPADEV to WPA2******\n"); } //pDevice->bWPASuppWextEnabled =true; break; case IW_AUTH_CIPHER_PAIRWISE: pairwise = wrq->value; if(pairwise == IW_AUTH_CIPHER_CCMP){ pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; }else if(pairwise == IW_AUTH_CIPHER_TKIP){ pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; }else if(pairwise == IW_AUTH_CIPHER_WEP40||pairwise == IW_AUTH_CIPHER_WEP104){ pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled; }else if(pairwise == IW_AUTH_CIPHER_NONE){ //do nothing,einsn liu }else pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled; break; case IW_AUTH_CIPHER_GROUP: if(wpa_version == IW_AUTH_WPA_VERSION_DISABLED) break; if(pairwise == IW_AUTH_CIPHER_NONE){ if(wrq->value == IW_AUTH_CIPHER_CCMP){ pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; }else { pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; } } break; case IW_AUTH_KEY_MGMT: if(wpa_version == IW_AUTH_WPA_VERSION_WPA2){ if(wrq->value == IW_AUTH_KEY_MGMT_PSK) pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK; else pMgmt->eAuthenMode = WMAC_AUTH_WPA2; }else if(wpa_version == IW_AUTH_WPA_VERSION_WPA){ if(wrq->value == 0){ pMgmt->eAuthenMode = WMAC_AUTH_WPANONE; }else if(wrq->value == IW_AUTH_KEY_MGMT_PSK) pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK; else pMgmt->eAuthenMode = WMAC_AUTH_WPA; } break; case IW_AUTH_TKIP_COUNTERMEASURES: break; /* FIXME */ case IW_AUTH_DROP_UNENCRYPTED: break; case IW_AUTH_80211_AUTH_ALG: if(wrq->value==IW_AUTH_ALG_OPEN_SYSTEM){ pMgmt->bShareKeyAlgorithm=false; }else if(wrq->value==IW_AUTH_ALG_SHARED_KEY){ pMgmt->bShareKeyAlgorithm=true; } break; case IW_AUTH_WPA_ENABLED: //pDevice->bWPADevEnable = !! wrq->value; break; case IW_AUTH_RX_UNENCRYPTED_EAPOL: break; case IW_AUTH_ROAMING_CONTROL: ret = -EOPNOTSUPP; break; case IW_AUTH_PRIVACY_INVOKED: pDevice->bEncryptionEnable = !!wrq->value; if(pDevice->bEncryptionEnable == false){ wpa_version = 0; pairwise = 0; pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled; pMgmt->bShareKeyAlgorithm = false; pMgmt->eAuthenMode = false; //pDevice->bWPADevEnable = false; } break; default: ret = -EOPNOTSUPP; break; } /* DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_version = %d\n",wpa_version); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise = %d\n",pairwise); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->eEncryptionStatus = %d\n",pDevice->eEncryptionStatus); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pMgmt->eAuthenMode = %d\n",pMgmt->eAuthenMode); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pMgmt->bShareKeyAlgorithm = %s\n",pMgmt->bShareKeyAlgorithm?"true":"false"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->bEncryptionEnable = %s\n",pDevice->bEncryptionEnable?"true":"false"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->bWPADevEnable = %s\n",pDevice->bWPADevEnable?"true":"false"); */ return ret; } int iwctl_giwauth(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { return -EOPNOTSUPP; } int iwctl_siwgenie(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int ret=0; if(wrq->length){ if ((wrq->length < 2) || (extra[1]+2 != wrq->length)) { ret = -EINVAL; goto out; } if(wrq->length > MAX_WPA_IE_LEN){ ret = -ENOMEM; goto out; } memset(pMgmt->abyWPAIE, 0, MAX_WPA_IE_LEN); if(copy_from_user(pMgmt->abyWPAIE, extra, wrq->length)){ ret = -EFAULT; goto out; } pMgmt->wWPAIELen = wrq->length; }else { memset(pMgmt->abyWPAIE, 0, MAX_WPA_IE_LEN); pMgmt->wWPAIELen = 0; } out://not completely ...not necessary in wpa_supplicant 0.5.8 return ret; } int iwctl_giwgenie(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int ret=0; int space = wrq->length; wrq->length = 0; if(pMgmt->wWPAIELen > 0){ wrq->length = pMgmt->wWPAIELen; if(pMgmt->wWPAIELen <= space){ if(copy_to_user(extra, pMgmt->abyWPAIE, pMgmt->wWPAIELen)){ ret = -EFAULT; } }else ret = -E2BIG; } return ret; } int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); struct iw_encode_ext *ext = (struct iw_encode_ext*)extra; struct viawget_wpa_param *param=NULL; //original member wpa_alg alg_name; u8 addr[6]; int key_idx, set_tx=0; u8 seq[IW_ENCODE_SEQ_MAX_SIZE]; u8 key[64]; size_t seq_len=0,key_len=0; // // int ii; u8 *buf; size_t blen; u8 key_array[64]; int ret=0; PRINT_K("SIOCSIWENCODEEXT...... \n"); blen = sizeof(*param); buf = kmalloc((int)blen, (int)GFP_KERNEL); if (buf == NULL) return -ENOMEM; memset(buf, 0, blen); param = (struct viawget_wpa_param *) buf; //recover alg_name switch (ext->alg) { case IW_ENCODE_ALG_NONE: alg_name = WPA_ALG_NONE; break; case IW_ENCODE_ALG_WEP: alg_name = WPA_ALG_WEP; break; case IW_ENCODE_ALG_TKIP: alg_name = WPA_ALG_TKIP; break; case IW_ENCODE_ALG_CCMP: alg_name = WPA_ALG_CCMP; break; default: PRINT_K("Unknown alg = %d\n",ext->alg); ret= -ENOMEM; goto error; } //recover addr memcpy(addr, ext->addr.sa_data, ETH_ALEN); //recover key_idx key_idx = (wrq->flags&IW_ENCODE_INDEX) - 1; //recover set_tx if(ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) set_tx = 1; //recover seq,seq_len if(ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { seq_len=IW_ENCODE_SEQ_MAX_SIZE; memcpy(seq, ext->rx_seq, seq_len); } //recover key,key_len if(ext->key_len) { key_len=ext->key_len; memcpy(key, &ext->key[0], key_len); } memset(key_array, 0, 64); if ( key_len > 0) { memcpy(key_array, key, key_len); if (key_len == 32) { // notice ! the oder memcpy(&key_array[16], &key[24], 8); memcpy(&key_array[24], &key[16], 8); } } /**************Translate iw_encode_ext to viawget_wpa_param****************/ memcpy(param->addr, addr, ETH_ALEN); param->u.wpa_key.alg_name = (int)alg_name; param->u.wpa_key.set_tx = set_tx; param->u.wpa_key.key_index = key_idx; param->u.wpa_key.key_len = key_len; param->u.wpa_key.key = (u8 *)key_array; param->u.wpa_key.seq = (u8 *)seq; param->u.wpa_key.seq_len = seq_len; #if 0 printk("param->u.wpa_key.alg_name =%d\n",param->u.wpa_key.alg_name); printk(KERN_DEBUG "param->addr=%pM\n", param->addr); printk("param->u.wpa_key.set_tx =%d\n",param->u.wpa_key.set_tx); printk("param->u.wpa_key.key_index =%d\n",param->u.wpa_key.key_index); printk("param->u.wpa_key.key_len =%d\n",param->u.wpa_key.key_len); printk("param->u.wpa_key.key ="); for(ii=0;ii<param->u.wpa_key.key_len;ii++) printk("%02x:",param->u.wpa_key.key[ii]); printk("\n"); printk("param->u.wpa_key.seq_len =%d\n",param->u.wpa_key.seq_len); printk("param->u.wpa_key.seq ="); for(ii=0;ii<param->u.wpa_key.seq_len;ii++) printk("%02x:",param->u.wpa_key.seq[ii]); printk("\n"); printk("...........\n"); #endif //****set if current action is Network Manager count?? //****this method is so foolish,but there is no other way??? if(param->u.wpa_key.alg_name == WPA_ALG_NONE) { if(param->u.wpa_key.key_index ==0) { pDevice->bwextcount++; } if((pDevice->bwextcount == 1)&&(param->u.wpa_key.key_index ==1)) { pDevice->bwextcount++; } if((pDevice->bwextcount ==2)&&(param->u.wpa_key.key_index ==2)) { pDevice->bwextcount++; } if((pDevice->bwextcount ==3)&&(param->u.wpa_key.key_index ==3)) { pDevice->bwextcount++; } } if( pDevice->bwextcount == 4) { printk("SIOCSIWENCODEEXT:Enable WPA WEXT SUPPORT!!!!!\n"); pDevice->bwextcount=0; pDevice->bWPASuppWextEnabled = true; } //****** spin_lock_irq(&pDevice->lock); ret = wpa_set_keys(pDevice, param, true); spin_unlock_irq(&pDevice->lock); error: kfree(param); return ret; } int iwctl_giwencodeext(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { return -EOPNOTSUPP; } int iwctl_siwmlme(struct net_device *dev, struct iw_request_info * info, struct iw_point *wrq, char *extra) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); struct iw_mlme *mlme = (struct iw_mlme *)extra; //u16 reason = cpu_to_le16(mlme->reason_code); int ret = 0; if(memcmp(pMgmt->abyCurrBSSID, mlme->addr.sa_data, ETH_ALEN)){ ret = -EINVAL; return ret; } switch(mlme->cmd){ case IW_MLME_DEAUTH: //this command seems to be not complete,please test it --einsnliu //bScheduleCommand((void *) pDevice, WLAN_CMD_DEAUTH, (unsigned char *)&reason); break; case IW_MLME_DISASSOC: if(pDevice->bLinkPass == true){ printk("iwctl_siwmlme--->send DISASSOCIATE\n"); //clear related flags memset(pMgmt->abyDesireBSSID, 0xFF,6); KeyvInitTable(&pDevice->sKey, pDevice->PortOffset); bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL); } break; default: ret = -EOPNOTSUPP; } return ret; } #endif /*------------------------------------------------------------------*/ /* * Structures to export the Wireless Handlers */ /* static const iw_handler iwctl_handler[] = { (iw_handler) iwctl_commit, // SIOCSIWCOMMIT (iw_handler) iwctl_giwname, // SIOCGIWNAME (iw_handler) NULL, // SIOCSIWNWID (iw_handler) NULL, // SIOCGIWNWID (iw_handler) iwctl_siwfreq, // SIOCSIWFREQ (iw_handler) iwctl_giwfreq, // SIOCGIWFREQ (iw_handler) iwctl_siwmode, // SIOCSIWMODE (iw_handler) iwctl_giwmode, // SIOCGIWMODE (iw_handler) NULL, // SIOCSIWSENS (iw_handler) iwctl_giwsens, // SIOCGIWSENS (iw_handler) NULL, // SIOCSIWRANGE (iw_handler) iwctl_giwrange, // SIOCGIWRANGE (iw_handler) NULL, // SIOCSIWPRIV (iw_handler) NULL, // SIOCGIWPRIV (iw_handler) NULL, // SIOCSIWSTATS (iw_handler) NULL, // SIOCGIWSTATS (iw_handler) NULL, // SIOCSIWSPY (iw_handler) NULL, // SIOCGIWSPY (iw_handler) NULL, // -- hole -- (iw_handler) NULL, // -- hole -- (iw_handler) iwctl_siwap, // SIOCSIWAP (iw_handler) iwctl_giwap, // SIOCGIWAP (iw_handler) NULL, // -- hole -- 0x16 (iw_handler) iwctl_giwaplist, // SIOCGIWAPLIST (iw_handler) iwctl_siwscan, // SIOCSIWSCAN (iw_handler) iwctl_giwscan, // SIOCGIWSCAN (iw_handler) iwctl_siwessid, // SIOCSIWESSID (iw_handler) iwctl_giwessid, // SIOCGIWESSID (iw_handler) NULL, // SIOCSIWNICKN (iw_handler) NULL, // SIOCGIWNICKN (iw_handler) NULL, // -- hole -- (iw_handler) NULL, // -- hole -- (iw_handler) iwctl_siwrate, // SIOCSIWRATE 0x20 (iw_handler) iwctl_giwrate, // SIOCGIWRATE (iw_handler) iwctl_siwrts, // SIOCSIWRTS (iw_handler) iwctl_giwrts, // SIOCGIWRTS (iw_handler) iwctl_siwfrag, // SIOCSIWFRAG (iw_handler) iwctl_giwfrag, // SIOCGIWFRAG (iw_handler) NULL, // SIOCSIWTXPOW (iw_handler) NULL, // SIOCGIWTXPOW (iw_handler) iwctl_siwretry, // SIOCSIWRETRY (iw_handler) iwctl_giwretry, // SIOCGIWRETRY (iw_handler) iwctl_siwencode, // SIOCSIWENCODE (iw_handler) iwctl_giwencode, // SIOCGIWENCODE (iw_handler) iwctl_siwpower, // SIOCSIWPOWER (iw_handler) iwctl_giwpower, // SIOCGIWPOWER (iw_handler) NULL, // -- hole -- (iw_handler) NULL, // -- hole -- (iw_handler) iwctl_siwgenie, // SIOCSIWGENIE (iw_handler) iwctl_giwgenie, // SIOCGIWGENIE (iw_handler) iwctl_siwauth, // SIOCSIWAUTH (iw_handler) iwctl_giwauth, // SIOCGIWAUTH (iw_handler) iwctl_siwencodeext, // SIOCSIWENCODEEXT (iw_handler) iwctl_giwencodeext, // SIOCGIWENCODEEXT (iw_handler) NULL, // SIOCSIWPMKSA (iw_handler) NULL, // -- hole -- }; */ static const iw_handler iwctl_handler[] = { (iw_handler) iwctl_commit, // SIOCSIWCOMMIT (iw_handler) NULL, // SIOCGIWNAME (iw_handler) NULL, // SIOCSIWNWID (iw_handler) NULL, // SIOCGIWNWID (iw_handler) NULL, // SIOCSIWFREQ (iw_handler) NULL, // SIOCGIWFREQ (iw_handler) NULL, // SIOCSIWMODE (iw_handler) NULL, // SIOCGIWMODE (iw_handler) NULL, // SIOCSIWSENS (iw_handler) NULL, // SIOCGIWSENS (iw_handler) NULL, // SIOCSIWRANGE (iw_handler) iwctl_giwrange, // SIOCGIWRANGE (iw_handler) NULL, // SIOCSIWPRIV (iw_handler) NULL, // SIOCGIWPRIV (iw_handler) NULL, // SIOCSIWSTATS (iw_handler) NULL, // SIOCGIWSTATS (iw_handler) NULL, // SIOCSIWSPY (iw_handler) NULL, // SIOCGIWSPY (iw_handler) NULL, // -- hole -- (iw_handler) NULL, // -- hole -- (iw_handler) NULL, // SIOCSIWAP (iw_handler) NULL, // SIOCGIWAP (iw_handler) NULL, // -- hole -- 0x16 (iw_handler) NULL, // SIOCGIWAPLIST (iw_handler) iwctl_siwscan, // SIOCSIWSCAN (iw_handler) iwctl_giwscan, // SIOCGIWSCAN (iw_handler) NULL, // SIOCSIWESSID (iw_handler) NULL, // SIOCGIWESSID (iw_handler) NULL, // SIOCSIWNICKN (iw_handler) NULL, // SIOCGIWNICKN (iw_handler) NULL, // -- hole -- (iw_handler) NULL, // -- hole -- (iw_handler) NULL, // SIOCSIWRATE 0x20 (iw_handler) NULL, // SIOCGIWRATE (iw_handler) NULL, // SIOCSIWRTS (iw_handler) NULL, // SIOCGIWRTS (iw_handler) NULL, // SIOCSIWFRAG (iw_handler) NULL, // SIOCGIWFRAG (iw_handler) NULL, // SIOCSIWTXPOW (iw_handler) NULL, // SIOCGIWTXPOW (iw_handler) NULL, // SIOCSIWRETRY (iw_handler) NULL, // SIOCGIWRETRY (iw_handler) NULL, // SIOCSIWENCODE (iw_handler) NULL, // SIOCGIWENCODE (iw_handler) NULL, // SIOCSIWPOWER (iw_handler) NULL, // SIOCGIWPOWER //2008-0409-07, <Add> by Einsn Liu (iw_handler) NULL, // -- hole -- (iw_handler) NULL, // -- hole -- (iw_handler) NULL, // SIOCSIWGENIE (iw_handler) NULL, // SIOCGIWGENIE (iw_handler) NULL, // SIOCSIWAUTH (iw_handler) NULL, // SIOCGIWAUTH (iw_handler) NULL, // SIOCSIWENCODEEXT (iw_handler) NULL, // SIOCGIWENCODEEXT (iw_handler) NULL, // SIOCSIWPMKSA (iw_handler) NULL, // -- hole -- }; static const iw_handler iwctl_private_handler[] = { NULL, // SIOCIWFIRSTPRIV }; struct iw_priv_args iwctl_private_args[] = { { IOCTL_CMD_SET, IW_PRIV_TYPE_CHAR | 1024, 0, "set"}, }; const struct iw_handler_def iwctl_handler_def = { .get_wireless_stats = &iwctl_get_wireless_stats, .num_standard = sizeof(iwctl_handler)/sizeof(iw_handler), // .num_private = sizeof(iwctl_private_handler)/sizeof(iw_handler), // .num_private_args = sizeof(iwctl_private_args)/sizeof(struct iw_priv_args), .num_private = 0, .num_private_args = 0, .standard = (iw_handler *) iwctl_handler, // .private = (iw_handler *) iwctl_private_handler, // .private_args = (struct iw_priv_args *)iwctl_private_args, .private = NULL, .private_args = NULL, };
gpl-2.0
denseye73/mykernel
drivers/staging/tidspbridge/core/tiomap_io.c
4941
11615
/* * tiomap_io.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * Implementation for the io read/write routines. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <plat/dsp.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/dbdefs.h> /* ----------------------------------- Platform Manager */ #include <dspbridge/dev.h> #include <dspbridge/drv.h> /* ----------------------------------- OS Adaptation Layer */ #include <dspbridge/wdt.h> /* ----------------------------------- specific to this file */ #include "_tiomap.h" #include "_tiomap_pwr.h" #include "tiomap_io.h" static u32 ul_ext_base; static u32 ul_ext_end; static u32 shm0_end; static u32 ul_dyn_ext_base; static u32 ul_trace_sec_beg; static u32 ul_trace_sec_end; static u32 ul_shm_base_virt; bool symbols_reloaded = true; /* * ======== read_ext_dsp_data ======== * Copies DSP external memory buffers to the host side buffers. */ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt, u8 *host_buff, u32 dsp_addr, u32 ul_num_bytes, u32 mem_type) { int status = 0; struct bridge_dev_context *dev_context = dev_ctxt; u32 offset; u32 ul_tlb_base_virt = 0; u32 ul_shm_offset_virt = 0; u32 dw_ext_prog_virt_mem; u32 dw_base_addr = dev_context->dsp_ext_base_addr; bool trace_read = false; if (!ul_shm_base_virt) { status = dev_get_symbol(dev_context->dev_obj, SHMBASENAME, &ul_shm_base_virt); } /* Check if it is a read of Trace section */ if (!status && !ul_trace_sec_beg) { status = dev_get_symbol(dev_context->dev_obj, DSP_TRACESEC_BEG, &ul_trace_sec_beg); } if (!status && !ul_trace_sec_end) { status = dev_get_symbol(dev_context->dev_obj, DSP_TRACESEC_END, &ul_trace_sec_end); } if (!status) { if ((dsp_addr <= ul_trace_sec_end) && (dsp_addr >= ul_trace_sec_beg)) trace_read = true; } /* If reading from TRACE, force remap/unmap */ if (trace_read && dw_base_addr) { dw_base_addr = 0; dev_context->dsp_ext_base_addr = 0; } if (!dw_base_addr) { /* Initialize ul_ext_base and ul_ext_end */ ul_ext_base = 0; ul_ext_end = 0; /* Get DYNEXT_BEG, EXT_BEG and EXT_END. */ if (!status && !ul_dyn_ext_base) { status = dev_get_symbol(dev_context->dev_obj, DYNEXTBASE, &ul_dyn_ext_base); } if (!status) { status = dev_get_symbol(dev_context->dev_obj, EXTBASE, &ul_ext_base); } if (!status) { status = dev_get_symbol(dev_context->dev_obj, EXTEND, &ul_ext_end); } /* Trace buffer is right after the shm SEG0, * so set the base address to SHMBASE */ if (trace_read) { ul_ext_base = ul_shm_base_virt; ul_ext_end = ul_trace_sec_end; } if (ul_ext_end < ul_ext_base) status = -EPERM; if (!status) { ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE; dw_ext_prog_virt_mem = dev_context->atlb_entry[0].gpp_va; if (!trace_read) { ul_shm_offset_virt = ul_shm_base_virt - ul_tlb_base_virt; ul_shm_offset_virt += PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base + 1, HW_PAGE_SIZE64KB); dw_ext_prog_virt_mem -= ul_shm_offset_virt; dw_ext_prog_virt_mem += (ul_ext_base - ul_dyn_ext_base); dev_context->dsp_ext_base_addr = dw_ext_prog_virt_mem; /* * This dsp_ext_base_addr will get cleared * only when the board is stopped. */ if (!dev_context->dsp_ext_base_addr) status = -EPERM; } dw_base_addr = dw_ext_prog_virt_mem; } } if (!dw_base_addr || !ul_ext_base || !ul_ext_end) status = -EPERM; offset = dsp_addr - ul_ext_base; if (!status) memcpy(host_buff, (u8 *) dw_base_addr + offset, ul_num_bytes); return status; } /* * ======== write_dsp_data ======== * purpose: * Copies buffers to the DSP internal/external memory. */ int write_dsp_data(struct bridge_dev_context *dev_context, u8 *host_buff, u32 dsp_addr, u32 ul_num_bytes, u32 mem_type) { u32 offset; u32 dw_base_addr = dev_context->dsp_base_addr; struct cfg_hostres *resources = dev_context->resources; int status = 0; u32 base1, base2, base3; base1 = OMAP_DSP_MEM1_SIZE; base2 = OMAP_DSP_MEM2_BASE - OMAP_DSP_MEM1_BASE; base3 = OMAP_DSP_MEM3_BASE - OMAP_DSP_MEM1_BASE; if (!resources) return -EPERM; offset = dsp_addr - dev_context->dsp_start_add; if (offset < base1) { dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[2], resources->mem_length[2]); } else if (offset > base1 && offset < base2 + OMAP_DSP_MEM2_SIZE) { dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[3], resources->mem_length[3]); offset = offset - base2; } else if (offset >= base2 + OMAP_DSP_MEM2_SIZE && offset < base3 + OMAP_DSP_MEM3_SIZE) { dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[4], resources->mem_length[4]); offset = offset - base3; } else { return -EPERM; } if (ul_num_bytes) memcpy((u8 *) (dw_base_addr + offset), host_buff, ul_num_bytes); else *((u32 *) host_buff) = dw_base_addr + offset; return status; } /* * ======== write_ext_dsp_data ======== * purpose: * Copies buffers to the external memory. * */ int write_ext_dsp_data(struct bridge_dev_context *dev_context, u8 *host_buff, u32 dsp_addr, u32 ul_num_bytes, u32 mem_type, bool dynamic_load) { u32 dw_base_addr = dev_context->dsp_ext_base_addr; u32 dw_offset = 0; u8 temp_byte1, temp_byte2; u8 remain_byte[4]; s32 i; int ret = 0; u32 dw_ext_prog_virt_mem; u32 ul_tlb_base_virt = 0; u32 ul_shm_offset_virt = 0; struct cfg_hostres *host_res = dev_context->resources; bool trace_load = false; temp_byte1 = 0x0; temp_byte2 = 0x0; if (symbols_reloaded) { /* Check if it is a load to Trace section */ ret = dev_get_symbol(dev_context->dev_obj, DSP_TRACESEC_BEG, &ul_trace_sec_beg); if (!ret) ret = dev_get_symbol(dev_context->dev_obj, DSP_TRACESEC_END, &ul_trace_sec_end); } if (!ret) { if ((dsp_addr <= ul_trace_sec_end) && (dsp_addr >= ul_trace_sec_beg)) trace_load = true; } /* If dynamic, force remap/unmap */ if ((dynamic_load || trace_load) && dw_base_addr) { dw_base_addr = 0; MEM_UNMAP_LINEAR_ADDRESS((void *) dev_context->dsp_ext_base_addr); dev_context->dsp_ext_base_addr = 0x0; } if (!dw_base_addr) { if (symbols_reloaded) /* Get SHM_BEG EXT_BEG and EXT_END. */ ret = dev_get_symbol(dev_context->dev_obj, SHMBASENAME, &ul_shm_base_virt); if (dynamic_load) { if (!ret) { if (symbols_reloaded) ret = dev_get_symbol (dev_context->dev_obj, DYNEXTBASE, &ul_ext_base); } if (!ret) { /* DR OMAPS00013235 : DLModules array may be * in EXTMEM. It is expected that DYNEXTMEM and * EXTMEM are contiguous, so checking for the * upper bound at EXTEND should be Ok. */ if (symbols_reloaded) ret = dev_get_symbol (dev_context->dev_obj, EXTEND, &ul_ext_end); } } else { if (symbols_reloaded) { if (!ret) ret = dev_get_symbol (dev_context->dev_obj, EXTBASE, &ul_ext_base); if (!ret) ret = dev_get_symbol (dev_context->dev_obj, EXTEND, &ul_ext_end); } } /* Trace buffer it right after the shm SEG0, so set the * base address to SHMBASE */ if (trace_load) ul_ext_base = ul_shm_base_virt; if (ul_ext_end < ul_ext_base) ret = -EPERM; if (!ret) { ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE; if (symbols_reloaded) { ret = dev_get_symbol (dev_context->dev_obj, DSP_TRACESEC_END, &shm0_end); if (!ret) { ret = dev_get_symbol (dev_context->dev_obj, DYNEXTBASE, &ul_dyn_ext_base); } } ul_shm_offset_virt = ul_shm_base_virt - ul_tlb_base_virt; if (trace_load) { dw_ext_prog_virt_mem = dev_context->atlb_entry[0].gpp_va; } else { dw_ext_prog_virt_mem = host_res->mem_base[1]; dw_ext_prog_virt_mem += (ul_ext_base - ul_dyn_ext_base); } dev_context->dsp_ext_base_addr = (u32) MEM_LINEAR_ADDRESS((void *) dw_ext_prog_virt_mem, ul_ext_end - ul_ext_base); dw_base_addr += dev_context->dsp_ext_base_addr; /* This dsp_ext_base_addr will get cleared only when * the board is stopped. */ if (!dev_context->dsp_ext_base_addr) ret = -EPERM; } } if (!dw_base_addr || !ul_ext_base || !ul_ext_end) ret = -EPERM; if (!ret) { for (i = 0; i < 4; i++) remain_byte[i] = 0x0; dw_offset = dsp_addr - ul_ext_base; /* Also make sure the dsp_addr is < ul_ext_end */ if (dsp_addr > ul_ext_end || dw_offset > dsp_addr) ret = -EPERM; } if (!ret) { if (ul_num_bytes) memcpy((u8 *) dw_base_addr + dw_offset, host_buff, ul_num_bytes); else *((u32 *) host_buff) = dw_base_addr + dw_offset; } /* Unmap here to force remap for other Ext loads */ if ((dynamic_load || trace_load) && dev_context->dsp_ext_base_addr) { MEM_UNMAP_LINEAR_ADDRESS((void *) dev_context->dsp_ext_base_addr); dev_context->dsp_ext_base_addr = 0x0; } symbols_reloaded = false; return ret; } int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val) { #ifdef CONFIG_TIDSPBRIDGE_DVFS u32 opplevel = 0; #endif struct omap_dsp_platform_data *pdata = omap_dspbridge_dev->dev.platform_data; struct cfg_hostres *resources = dev_context->resources; int status = 0; u32 temp; if (!dev_context->mbox) return 0; if (!resources) return -EPERM; if (dev_context->brd_state == BRD_DSP_HIBERNATION || dev_context->brd_state == BRD_HIBERNATION) { #ifdef CONFIG_TIDSPBRIDGE_DVFS if (pdata->dsp_get_opp) opplevel = (*pdata->dsp_get_opp) (); if (opplevel == VDD1_OPP1) { if (pdata->dsp_set_min_opp) (*pdata->dsp_set_min_opp) (VDD1_OPP2); } #endif /* Restart the peripheral clocks */ dsp_clock_enable_all(dev_context->dsp_per_clks); dsp_wdt_enable(true); /* * 2:0 AUTO_IVA2_DPLL - Enabling IVA2 DPLL auto control * in CM_AUTOIDLE_PLL_IVA2 register */ (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT, OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL); /* * 7:4 IVA2_DPLL_FREQSEL - IVA2 internal frq set to * 0.75 MHz - 1.0 MHz * 2:0 EN_IVA2_DPLL - Enable IVA2 DPLL in lock mode */ (*pdata->dsp_cm_rmw_bits)(OMAP3430_IVA2_DPLL_FREQSEL_MASK | OMAP3430_EN_IVA2_DPLL_MASK, 0x3 << OMAP3430_IVA2_DPLL_FREQSEL_SHIFT | 0x7 << OMAP3430_EN_IVA2_DPLL_SHIFT, OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL); /* Restore mailbox settings */ omap_mbox_restore_ctx(dev_context->mbox); /* Access MMU SYS CONFIG register to generate a short wakeup */ temp = readl(resources->dmmu_base + 0x10); dev_context->brd_state = BRD_RUNNING; } else if (dev_context->brd_state == BRD_RETENTION) { /* Restart the peripheral clocks */ dsp_clock_enable_all(dev_context->dsp_per_clks); } status = omap_mbox_msg_send(dev_context->mbox, mb_val); if (status) { pr_err("omap_mbox_msg_send Fail and status = %d\n", status); status = -EPERM; } return 0; }
gpl-2.0
Sparhawk76/android_kernel_samsung_afyonltev1
drivers/xen/sys-hypervisor.c
4941
9370
/* * copyright (c) 2006 IBM Corporation * Authored by: Mike D. Day <ncmike@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/kobject.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> #include <xen/xen.h> #include <xen/xenbus.h> #include <xen/interface/xen.h> #include <xen/interface/version.h> #define HYPERVISOR_ATTR_RO(_name) \ static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name) #define HYPERVISOR_ATTR_RW(_name) \ static struct hyp_sysfs_attr _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) struct hyp_sysfs_attr { struct attribute attr; ssize_t (*show)(struct hyp_sysfs_attr *, char *); ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t); void *hyp_attr_data; }; static ssize_t type_show(struct hyp_sysfs_attr *attr, char *buffer) { return sprintf(buffer, "xen\n"); } HYPERVISOR_ATTR_RO(type); static int __init xen_sysfs_type_init(void) { return sysfs_create_file(hypervisor_kobj, &type_attr.attr); } static void xen_sysfs_type_destroy(void) { sysfs_remove_file(hypervisor_kobj, &type_attr.attr); } /* xen version attributes */ static ssize_t major_show(struct hyp_sysfs_attr *attr, char *buffer) { int version = HYPERVISOR_xen_version(XENVER_version, NULL); if (version) return sprintf(buffer, "%d\n", version >> 16); return -ENODEV; } HYPERVISOR_ATTR_RO(major); static ssize_t minor_show(struct hyp_sysfs_attr *attr, char *buffer) { int version = HYPERVISOR_xen_version(XENVER_version, NULL); if (version) return sprintf(buffer, "%d\n", version & 0xff); return -ENODEV; } HYPERVISOR_ATTR_RO(minor); static ssize_t extra_show(struct hyp_sysfs_attr *attr, char *buffer) { int ret = -ENOMEM; char *extra; extra = kmalloc(XEN_EXTRAVERSION_LEN, GFP_KERNEL); if (extra) { ret = HYPERVISOR_xen_version(XENVER_extraversion, extra); if (!ret) ret = sprintf(buffer, "%s\n", extra); kfree(extra); } return ret; } HYPERVISOR_ATTR_RO(extra); static struct attribute *version_attrs[] = { &major_attr.attr, &minor_attr.attr, &extra_attr.attr, NULL }; static const struct attribute_group version_group = { .name = "version", .attrs = version_attrs, }; static int __init xen_sysfs_version_init(void) { return sysfs_create_group(hypervisor_kobj, &version_group); } static void xen_sysfs_version_destroy(void) { sysfs_remove_group(hypervisor_kobj, &version_group); } /* UUID */ static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer) { char *vm, *val; int ret; extern int xenstored_ready; if (!xenstored_ready) return -EBUSY; vm = xenbus_read(XBT_NIL, "vm", "", NULL); if (IS_ERR(vm)) return PTR_ERR(vm); val = xenbus_read(XBT_NIL, vm, "uuid", NULL); kfree(vm); if (IS_ERR(val)) return PTR_ERR(val); ret = sprintf(buffer, "%s\n", val); kfree(val); return ret; } HYPERVISOR_ATTR_RO(uuid); static int __init xen_sysfs_uuid_init(void) { return sysfs_create_file(hypervisor_kobj, &uuid_attr.attr); } static void xen_sysfs_uuid_destroy(void) { sysfs_remove_file(hypervisor_kobj, &uuid_attr.attr); } /* xen compilation attributes */ static ssize_t compiler_show(struct hyp_sysfs_attr *attr, char *buffer) { int ret = -ENOMEM; struct xen_compile_info *info; info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL); if (info) { ret = HYPERVISOR_xen_version(XENVER_compile_info, info); if (!ret) ret = sprintf(buffer, "%s\n", info->compiler); kfree(info); } return ret; } HYPERVISOR_ATTR_RO(compiler); static ssize_t compiled_by_show(struct hyp_sysfs_attr *attr, char *buffer) { int ret = -ENOMEM; struct xen_compile_info *info; info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL); if (info) { ret = HYPERVISOR_xen_version(XENVER_compile_info, info); if (!ret) ret = sprintf(buffer, "%s\n", info->compile_by); kfree(info); } return ret; } HYPERVISOR_ATTR_RO(compiled_by); static ssize_t compile_date_show(struct hyp_sysfs_attr *attr, char *buffer) { int ret = -ENOMEM; struct xen_compile_info *info; info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL); if (info) { ret = HYPERVISOR_xen_version(XENVER_compile_info, info); if (!ret) ret = sprintf(buffer, "%s\n", info->compile_date); kfree(info); } return ret; } HYPERVISOR_ATTR_RO(compile_date); static struct attribute *xen_compile_attrs[] = { &compiler_attr.attr, &compiled_by_attr.attr, &compile_date_attr.attr, NULL }; static const struct attribute_group xen_compilation_group = { .name = "compilation", .attrs = xen_compile_attrs, }; static int __init xen_compilation_init(void) { return sysfs_create_group(hypervisor_kobj, &xen_compilation_group); } static void xen_compilation_destroy(void) { sysfs_remove_group(hypervisor_kobj, &xen_compilation_group); } /* xen properties info */ static ssize_t capabilities_show(struct hyp_sysfs_attr *attr, char *buffer) { int ret = -ENOMEM; char *caps; caps = kmalloc(XEN_CAPABILITIES_INFO_LEN, GFP_KERNEL); if (caps) { ret = HYPERVISOR_xen_version(XENVER_capabilities, caps); if (!ret) ret = sprintf(buffer, "%s\n", caps); kfree(caps); } return ret; } HYPERVISOR_ATTR_RO(capabilities); static ssize_t changeset_show(struct hyp_sysfs_attr *attr, char *buffer) { int ret = -ENOMEM; char *cset; cset = kmalloc(XEN_CHANGESET_INFO_LEN, GFP_KERNEL); if (cset) { ret = HYPERVISOR_xen_version(XENVER_changeset, cset); if (!ret) ret = sprintf(buffer, "%s\n", cset); kfree(cset); } return ret; } HYPERVISOR_ATTR_RO(changeset); static ssize_t virtual_start_show(struct hyp_sysfs_attr *attr, char *buffer) { int ret = -ENOMEM; struct xen_platform_parameters *parms; parms = kmalloc(sizeof(struct xen_platform_parameters), GFP_KERNEL); if (parms) { ret = HYPERVISOR_xen_version(XENVER_platform_parameters, parms); if (!ret) ret = sprintf(buffer, "%lx\n", parms->virt_start); kfree(parms); } return ret; } HYPERVISOR_ATTR_RO(virtual_start); static ssize_t pagesize_show(struct hyp_sysfs_attr *attr, char *buffer) { int ret; ret = HYPERVISOR_xen_version(XENVER_pagesize, NULL); if (ret > 0) ret = sprintf(buffer, "%x\n", ret); return ret; } HYPERVISOR_ATTR_RO(pagesize); static ssize_t xen_feature_show(int index, char *buffer) { ssize_t ret; struct xen_feature_info info; info.submap_idx = index; ret = HYPERVISOR_xen_version(XENVER_get_features, &info); if (!ret) ret = sprintf(buffer, "%08x", info.submap); return ret; } static ssize_t features_show(struct hyp_sysfs_attr *attr, char *buffer) { ssize_t len; int i; len = 0; for (i = XENFEAT_NR_SUBMAPS-1; i >= 0; i--) { int ret = xen_feature_show(i, buffer + len); if (ret < 0) { if (len == 0) len = ret; break; } len += ret; } if (len > 0) buffer[len++] = '\n'; return len; } HYPERVISOR_ATTR_RO(features); static struct attribute *xen_properties_attrs[] = { &capabilities_attr.attr, &changeset_attr.attr, &virtual_start_attr.attr, &pagesize_attr.attr, &features_attr.attr, NULL }; static const struct attribute_group xen_properties_group = { .name = "properties", .attrs = xen_properties_attrs, }; static int __init xen_properties_init(void) { return sysfs_create_group(hypervisor_kobj, &xen_properties_group); } static void xen_properties_destroy(void) { sysfs_remove_group(hypervisor_kobj, &xen_properties_group); } static int __init hyper_sysfs_init(void) { int ret; if (!xen_domain()) return -ENODEV; ret = xen_sysfs_type_init(); if (ret) goto out; ret = xen_sysfs_version_init(); if (ret) goto version_out; ret = xen_compilation_init(); if (ret) goto comp_out; ret = xen_sysfs_uuid_init(); if (ret) goto uuid_out; ret = xen_properties_init(); if (ret) goto prop_out; goto out; prop_out: xen_sysfs_uuid_destroy(); uuid_out: xen_compilation_destroy(); comp_out: xen_sysfs_version_destroy(); version_out: xen_sysfs_type_destroy(); out: return ret; } static void __exit hyper_sysfs_exit(void) { xen_properties_destroy(); xen_compilation_destroy(); xen_sysfs_uuid_destroy(); xen_sysfs_version_destroy(); xen_sysfs_type_destroy(); } module_init(hyper_sysfs_init); module_exit(hyper_sysfs_exit); static ssize_t hyp_sysfs_show(struct kobject *kobj, struct attribute *attr, char *buffer) { struct hyp_sysfs_attr *hyp_attr; hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr); if (hyp_attr->show) return hyp_attr->show(hyp_attr, buffer); return 0; } static ssize_t hyp_sysfs_store(struct kobject *kobj, struct attribute *attr, const char *buffer, size_t len) { struct hyp_sysfs_attr *hyp_attr; hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr); if (hyp_attr->store) return hyp_attr->store(hyp_attr, buffer, len); return 0; } static const struct sysfs_ops hyp_sysfs_ops = { .show = hyp_sysfs_show, .store = hyp_sysfs_store, }; static struct kobj_type hyp_sysfs_kobj_type = { .sysfs_ops = &hyp_sysfs_ops, }; static int __init hypervisor_subsys_init(void) { if (!xen_domain()) return -ENODEV; hypervisor_kobj->ktype = &hyp_sysfs_kobj_type; return 0; } device_initcall(hypervisor_subsys_init);
gpl-2.0
XXMrHyde/android_kernel_lge_hammerhead
drivers/rtc/rtc-s35390a.c
4941
7580
/* * Seiko Instruments S-35390A RTC Driver * * Copyright (c) 2007 Byron Bradley * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/rtc.h> #include <linux/i2c.h> #include <linux/bitrev.h> #include <linux/bcd.h> #include <linux/slab.h> #define S35390A_CMD_STATUS1 0 #define S35390A_CMD_STATUS2 1 #define S35390A_CMD_TIME1 2 #define S35390A_BYTE_YEAR 0 #define S35390A_BYTE_MONTH 1 #define S35390A_BYTE_DAY 2 #define S35390A_BYTE_WDAY 3 #define S35390A_BYTE_HOURS 4 #define S35390A_BYTE_MINS 5 #define S35390A_BYTE_SECS 6 #define S35390A_FLAG_POC 0x01 #define S35390A_FLAG_BLD 0x02 #define S35390A_FLAG_24H 0x40 #define S35390A_FLAG_RESET 0x80 #define S35390A_FLAG_TEST 0x01 static const struct i2c_device_id s35390a_id[] = { { "s35390a", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, s35390a_id); struct s35390a { struct i2c_client *client[8]; struct rtc_device *rtc; int twentyfourhour; }; static int s35390a_set_reg(struct s35390a *s35390a, int reg, char *buf, int len) { struct i2c_client *client = s35390a->client[reg]; struct i2c_msg msg[] = { { client->addr, 0, len, buf }, }; if ((i2c_transfer(client->adapter, msg, 1)) != 1) return -EIO; return 0; } static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len) { struct i2c_client *client = s35390a->client[reg]; struct i2c_msg msg[] = { { client->addr, I2C_M_RD, len, buf }, }; if ((i2c_transfer(client->adapter, msg, 1)) != 1) return -EIO; return 0; } static int s35390a_reset(struct s35390a *s35390a) { char buf[1]; if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0) return -EIO; if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD))) return 0; buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H); buf[0] &= 0xf0; return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); } static int s35390a_disable_test_mode(struct s35390a *s35390a) { char buf[1]; if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf)) < 0) return -EIO; if (!(buf[0] & S35390A_FLAG_TEST)) return 0; buf[0] &= ~S35390A_FLAG_TEST; return s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf)); } static char s35390a_hr2reg(struct s35390a *s35390a, int hour) { if (s35390a->twentyfourhour) return bin2bcd(hour); if (hour < 12) return bin2bcd(hour); return 0x40 | bin2bcd(hour - 12); } static int s35390a_reg2hr(struct s35390a *s35390a, char reg) { unsigned hour; if (s35390a->twentyfourhour) return bcd2bin(reg & 0x3f); hour = bcd2bin(reg & 0x3f); if (reg & 0x40) hour += 12; return hour; } static int s35390a_set_datetime(struct i2c_client *client, struct rtc_time *tm) { struct s35390a *s35390a = i2c_get_clientdata(client); int i, err; char buf[7]; dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d mday=%d, " "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); buf[S35390A_BYTE_YEAR] = bin2bcd(tm->tm_year - 100); buf[S35390A_BYTE_MONTH] = bin2bcd(tm->tm_mon + 1); buf[S35390A_BYTE_DAY] = bin2bcd(tm->tm_mday); buf[S35390A_BYTE_WDAY] = bin2bcd(tm->tm_wday); buf[S35390A_BYTE_HOURS] = s35390a_hr2reg(s35390a, tm->tm_hour); buf[S35390A_BYTE_MINS] = bin2bcd(tm->tm_min); buf[S35390A_BYTE_SECS] = bin2bcd(tm->tm_sec); /* This chip expects the bits of each byte to be in reverse order */ for (i = 0; i < 7; ++i) buf[i] = bitrev8(buf[i]); err = s35390a_set_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf)); return err; } static int s35390a_get_datetime(struct i2c_client *client, struct rtc_time *tm) { struct s35390a *s35390a = i2c_get_clientdata(client); char buf[7]; int i, err; err = s35390a_get_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf)); if (err < 0) return err; /* This chip returns the bits of each byte in reverse order */ for (i = 0; i < 7; ++i) buf[i] = bitrev8(buf[i]); tm->tm_sec = bcd2bin(buf[S35390A_BYTE_SECS]); tm->tm_min = bcd2bin(buf[S35390A_BYTE_MINS]); tm->tm_hour = s35390a_reg2hr(s35390a, buf[S35390A_BYTE_HOURS]); tm->tm_wday = bcd2bin(buf[S35390A_BYTE_WDAY]); tm->tm_mday = bcd2bin(buf[S35390A_BYTE_DAY]); tm->tm_mon = bcd2bin(buf[S35390A_BYTE_MONTH]) - 1; tm->tm_year = bcd2bin(buf[S35390A_BYTE_YEAR]) + 100; dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, mday=%d, " "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); return rtc_valid_tm(tm); } static int s35390a_rtc_read_time(struct device *dev, struct rtc_time *tm) { return s35390a_get_datetime(to_i2c_client(dev), tm); } static int s35390a_rtc_set_time(struct device *dev, struct rtc_time *tm) { return s35390a_set_datetime(to_i2c_client(dev), tm); } static const struct rtc_class_ops s35390a_rtc_ops = { .read_time = s35390a_rtc_read_time, .set_time = s35390a_rtc_set_time, }; static struct i2c_driver s35390a_driver; static int s35390a_probe(struct i2c_client *client, const struct i2c_device_id *id) { int err; unsigned int i; struct s35390a *s35390a; struct rtc_time tm; char buf[1]; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { err = -ENODEV; goto exit; } s35390a = kzalloc(sizeof(struct s35390a), GFP_KERNEL); if (!s35390a) { err = -ENOMEM; goto exit; } s35390a->client[0] = client; i2c_set_clientdata(client, s35390a); /* This chip uses multiple addresses, use dummy devices for them */ for (i = 1; i < 8; ++i) { s35390a->client[i] = i2c_new_dummy(client->adapter, client->addr + i); if (!s35390a->client[i]) { dev_err(&client->dev, "Address %02x unavailable\n", client->addr + i); err = -EBUSY; goto exit_dummy; } } err = s35390a_reset(s35390a); if (err < 0) { dev_err(&client->dev, "error resetting chip\n"); goto exit_dummy; } err = s35390a_disable_test_mode(s35390a); if (err < 0) { dev_err(&client->dev, "error disabling test mode\n"); goto exit_dummy; } err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); if (err < 0) { dev_err(&client->dev, "error checking 12/24 hour mode\n"); goto exit_dummy; } if (buf[0] & S35390A_FLAG_24H) s35390a->twentyfourhour = 1; else s35390a->twentyfourhour = 0; if (s35390a_get_datetime(client, &tm) < 0) dev_warn(&client->dev, "clock needs to be set\n"); s35390a->rtc = rtc_device_register(s35390a_driver.driver.name, &client->dev, &s35390a_rtc_ops, THIS_MODULE); if (IS_ERR(s35390a->rtc)) { err = PTR_ERR(s35390a->rtc); goto exit_dummy; } return 0; exit_dummy: for (i = 1; i < 8; ++i) if (s35390a->client[i]) i2c_unregister_device(s35390a->client[i]); kfree(s35390a); exit: return err; } static int s35390a_remove(struct i2c_client *client) { unsigned int i; struct s35390a *s35390a = i2c_get_clientdata(client); for (i = 1; i < 8; ++i) if (s35390a->client[i]) i2c_unregister_device(s35390a->client[i]); rtc_device_unregister(s35390a->rtc); kfree(s35390a); return 0; } static struct i2c_driver s35390a_driver = { .driver = { .name = "rtc-s35390a", }, .probe = s35390a_probe, .remove = s35390a_remove, .id_table = s35390a_id, }; module_i2c_driver(s35390a_driver); MODULE_AUTHOR("Byron Bradley <byron.bbradley@gmail.com>"); MODULE_DESCRIPTION("S35390A RTC driver"); MODULE_LICENSE("GPL");
gpl-2.0
s9yobena/linux
drivers/clocksource/dw_apb_timer.c
7501
11521
/* * (C) Copyright 2009 Intel Corporation * Author: Jacob Pan (jacob.jun.pan@intel.com) * * Shared with ARM platforms, Jamie Iles, Picochip 2011 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Support for the Synopsys DesignWare APB Timers. */ #include <linux/dw_apb_timer.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/slab.h> #define APBT_MIN_PERIOD 4 #define APBT_MIN_DELTA_USEC 200 #define APBTMR_N_LOAD_COUNT 0x00 #define APBTMR_N_CURRENT_VALUE 0x04 #define APBTMR_N_CONTROL 0x08 #define APBTMR_N_EOI 0x0c #define APBTMR_N_INT_STATUS 0x10 #define APBTMRS_INT_STATUS 0xa0 #define APBTMRS_EOI 0xa4 #define APBTMRS_RAW_INT_STATUS 0xa8 #define APBTMRS_COMP_VERSION 0xac #define APBTMR_CONTROL_ENABLE (1 << 0) /* 1: periodic, 0:free running. */ #define APBTMR_CONTROL_MODE_PERIODIC (1 << 1) #define APBTMR_CONTROL_INT (1 << 2) static inline struct dw_apb_clock_event_device * ced_to_dw_apb_ced(struct clock_event_device *evt) { return container_of(evt, struct dw_apb_clock_event_device, ced); } static inline struct dw_apb_clocksource * clocksource_to_dw_apb_clocksource(struct clocksource *cs) { return container_of(cs, struct dw_apb_clocksource, cs); } static unsigned long apbt_readl(struct dw_apb_timer *timer, unsigned long offs) { return readl(timer->base + offs); } static void apbt_writel(struct dw_apb_timer *timer, unsigned long val, unsigned long offs) { writel(val, timer->base + offs); } static void apbt_disable_int(struct dw_apb_timer *timer) { unsigned long ctrl = apbt_readl(timer, APBTMR_N_CONTROL); ctrl |= APBTMR_CONTROL_INT; apbt_writel(timer, ctrl, APBTMR_N_CONTROL); } /** * dw_apb_clockevent_pause() - stop the clock_event_device from running * * @dw_ced: The APB clock to stop generating events. */ void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced) { disable_irq(dw_ced->timer.irq); apbt_disable_int(&dw_ced->timer); } static void apbt_eoi(struct dw_apb_timer *timer) { apbt_readl(timer, APBTMR_N_EOI); } static irqreturn_t dw_apb_clockevent_irq(int irq, void *data) { struct clock_event_device *evt = data; struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); if (!evt->event_handler) { pr_info("Spurious APBT timer interrupt %d", irq); return IRQ_NONE; } if (dw_ced->eoi) dw_ced->eoi(&dw_ced->timer); evt->event_handler(evt); return IRQ_HANDLED; } static void apbt_enable_int(struct dw_apb_timer *timer) { unsigned long ctrl = apbt_readl(timer, APBTMR_N_CONTROL); /* clear pending intr */ apbt_readl(timer, APBTMR_N_EOI); ctrl &= ~APBTMR_CONTROL_INT; apbt_writel(timer, ctrl, APBTMR_N_CONTROL); } static void apbt_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { unsigned long ctrl; unsigned long period; struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); pr_debug("%s CPU %d mode=%d\n", __func__, first_cpu(*evt->cpumask), mode); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: period = DIV_ROUND_UP(dw_ced->timer.freq, HZ); ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); ctrl |= APBTMR_CONTROL_MODE_PERIODIC; apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); /* * DW APB p. 46, have to disable timer before load counter, * may cause sync problem. */ ctrl &= ~APBTMR_CONTROL_ENABLE; apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); udelay(1); pr_debug("Setting clock period %lu for HZ %d\n", period, HZ); apbt_writel(&dw_ced->timer, period, APBTMR_N_LOAD_COUNT); ctrl |= APBTMR_CONTROL_ENABLE; apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); break; case CLOCK_EVT_MODE_ONESHOT: ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); /* * set free running mode, this mode will let timer reload max * timeout which will give time (3min on 25MHz clock) to rearm * the next event, therefore emulate the one-shot mode. */ ctrl &= ~APBTMR_CONTROL_ENABLE; ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC; apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); /* write again to set free running mode */ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); /* * DW APB p. 46, load counter with all 1s before starting free * running mode. */ apbt_writel(&dw_ced->timer, ~0, APBTMR_N_LOAD_COUNT); ctrl &= ~APBTMR_CONTROL_INT; ctrl |= APBTMR_CONTROL_ENABLE; apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); ctrl &= ~APBTMR_CONTROL_ENABLE; apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); break; case CLOCK_EVT_MODE_RESUME: apbt_enable_int(&dw_ced->timer); break; } } static int apbt_next_event(unsigned long delta, struct clock_event_device *evt) { unsigned long ctrl; struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); /* Disable timer */ ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); ctrl &= ~APBTMR_CONTROL_ENABLE; apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); /* write new count */ apbt_writel(&dw_ced->timer, delta, APBTMR_N_LOAD_COUNT); ctrl |= APBTMR_CONTROL_ENABLE; apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); return 0; } /** * dw_apb_clockevent_init() - use an APB timer as a clock_event_device * * @cpu: The CPU the events will be targeted at. * @name: The name used for the timer and the IRQ for it. * @rating: The rating to give the timer. * @base: I/O base for the timer registers. * @irq: The interrupt number to use for the timer. * @freq: The frequency that the timer counts at. * * This creates a clock_event_device for using with the generic clock layer * but does not start and register it. This should be done with * dw_apb_clockevent_register() as the next step. If this is the first time * it has been called for a timer then the IRQ will be requested, if not it * just be enabled to allow CPU hotplug to avoid repeatedly requesting and * releasing the IRQ. */ struct dw_apb_clock_event_device * dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, void __iomem *base, int irq, unsigned long freq) { struct dw_apb_clock_event_device *dw_ced = kzalloc(sizeof(*dw_ced), GFP_KERNEL); int err; if (!dw_ced) return NULL; dw_ced->timer.base = base; dw_ced->timer.irq = irq; dw_ced->timer.freq = freq; clockevents_calc_mult_shift(&dw_ced->ced, freq, APBT_MIN_PERIOD); dw_ced->ced.max_delta_ns = clockevent_delta2ns(0x7fffffff, &dw_ced->ced); dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced); dw_ced->ced.cpumask = cpumask_of(cpu); dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; dw_ced->ced.set_mode = apbt_set_mode; dw_ced->ced.set_next_event = apbt_next_event; dw_ced->ced.irq = dw_ced->timer.irq; dw_ced->ced.rating = rating; dw_ced->ced.name = name; dw_ced->irqaction.name = dw_ced->ced.name; dw_ced->irqaction.handler = dw_apb_clockevent_irq; dw_ced->irqaction.dev_id = &dw_ced->ced; dw_ced->irqaction.irq = irq; dw_ced->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING | IRQF_DISABLED; dw_ced->eoi = apbt_eoi; err = setup_irq(irq, &dw_ced->irqaction); if (err) { pr_err("failed to request timer irq\n"); kfree(dw_ced); dw_ced = NULL; } return dw_ced; } /** * dw_apb_clockevent_resume() - resume a clock that has been paused. * * @dw_ced: The APB clock to resume. */ void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced) { enable_irq(dw_ced->timer.irq); } /** * dw_apb_clockevent_stop() - stop the clock_event_device and release the IRQ. * * @dw_ced: The APB clock to stop generating the events. */ void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced) { free_irq(dw_ced->timer.irq, &dw_ced->ced); } /** * dw_apb_clockevent_register() - register the clock with the generic layer * * @dw_ced: The APB clock to register as a clock_event_device. */ void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced) { apbt_writel(&dw_ced->timer, 0, APBTMR_N_CONTROL); clockevents_register_device(&dw_ced->ced); apbt_enable_int(&dw_ced->timer); } /** * dw_apb_clocksource_start() - start the clocksource counting. * * @dw_cs: The clocksource to start. * * This is used to start the clocksource before registration and can be used * to enable calibration of timers. */ void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs) { /* * start count down from 0xffff_ffff. this is done by toggling the * enable bit then load initial load count to ~0. */ unsigned long ctrl = apbt_readl(&dw_cs->timer, APBTMR_N_CONTROL); ctrl &= ~APBTMR_CONTROL_ENABLE; apbt_writel(&dw_cs->timer, ctrl, APBTMR_N_CONTROL); apbt_writel(&dw_cs->timer, ~0, APBTMR_N_LOAD_COUNT); /* enable, mask interrupt */ ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC; ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT); apbt_writel(&dw_cs->timer, ctrl, APBTMR_N_CONTROL); /* read it once to get cached counter value initialized */ dw_apb_clocksource_read(dw_cs); } static cycle_t __apbt_read_clocksource(struct clocksource *cs) { unsigned long current_count; struct dw_apb_clocksource *dw_cs = clocksource_to_dw_apb_clocksource(cs); current_count = apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); return (cycle_t)~current_count; } static void apbt_restart_clocksource(struct clocksource *cs) { struct dw_apb_clocksource *dw_cs = clocksource_to_dw_apb_clocksource(cs); dw_apb_clocksource_start(dw_cs); } /** * dw_apb_clocksource_init() - use an APB timer as a clocksource. * * @rating: The rating to give the clocksource. * @name: The name for the clocksource. * @base: The I/O base for the timer registers. * @freq: The frequency that the timer counts at. * * This creates a clocksource using an APB timer but does not yet register it * with the clocksource system. This should be done with * dw_apb_clocksource_register() as the next step. */ struct dw_apb_clocksource * dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base, unsigned long freq) { struct dw_apb_clocksource *dw_cs = kzalloc(sizeof(*dw_cs), GFP_KERNEL); if (!dw_cs) return NULL; dw_cs->timer.base = base; dw_cs->timer.freq = freq; dw_cs->cs.name = name; dw_cs->cs.rating = rating; dw_cs->cs.read = __apbt_read_clocksource; dw_cs->cs.mask = CLOCKSOURCE_MASK(32); dw_cs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS; dw_cs->cs.resume = apbt_restart_clocksource; return dw_cs; } /** * dw_apb_clocksource_register() - register the APB clocksource. * * @dw_cs: The clocksource to register. */ void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs) { clocksource_register_hz(&dw_cs->cs, dw_cs->timer.freq); } /** * dw_apb_clocksource_read() - read the current value of a clocksource. * * @dw_cs: The clocksource to read. */ cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs) { return (cycle_t)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); } /** * dw_apb_clocksource_unregister() - unregister and free a clocksource. * * @dw_cs: The clocksource to unregister/free. */ void dw_apb_clocksource_unregister(struct dw_apb_clocksource *dw_cs) { clocksource_unregister(&dw_cs->cs); kfree(dw_cs); }
gpl-2.0
chijure/phoenix_v1
drivers/clocksource/dw_apb_timer.c
7501
11521
/* * (C) Copyright 2009 Intel Corporation * Author: Jacob Pan (jacob.jun.pan@intel.com) * * Shared with ARM platforms, Jamie Iles, Picochip 2011 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Support for the Synopsys DesignWare APB Timers. */ #include <linux/dw_apb_timer.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/slab.h> #define APBT_MIN_PERIOD 4 #define APBT_MIN_DELTA_USEC 200 #define APBTMR_N_LOAD_COUNT 0x00 #define APBTMR_N_CURRENT_VALUE 0x04 #define APBTMR_N_CONTROL 0x08 #define APBTMR_N_EOI 0x0c #define APBTMR_N_INT_STATUS 0x10 #define APBTMRS_INT_STATUS 0xa0 #define APBTMRS_EOI 0xa4 #define APBTMRS_RAW_INT_STATUS 0xa8 #define APBTMRS_COMP_VERSION 0xac #define APBTMR_CONTROL_ENABLE (1 << 0) /* 1: periodic, 0:free running. */ #define APBTMR_CONTROL_MODE_PERIODIC (1 << 1) #define APBTMR_CONTROL_INT (1 << 2) static inline struct dw_apb_clock_event_device * ced_to_dw_apb_ced(struct clock_event_device *evt) { return container_of(evt, struct dw_apb_clock_event_device, ced); } static inline struct dw_apb_clocksource * clocksource_to_dw_apb_clocksource(struct clocksource *cs) { return container_of(cs, struct dw_apb_clocksource, cs); } static unsigned long apbt_readl(struct dw_apb_timer *timer, unsigned long offs) { return readl(timer->base + offs); } static void apbt_writel(struct dw_apb_timer *timer, unsigned long val, unsigned long offs) { writel(val, timer->base + offs); } static void apbt_disable_int(struct dw_apb_timer *timer) { unsigned long ctrl = apbt_readl(timer, APBTMR_N_CONTROL); ctrl |= APBTMR_CONTROL_INT; apbt_writel(timer, ctrl, APBTMR_N_CONTROL); } /** * dw_apb_clockevent_pause() - stop the clock_event_device from running * * @dw_ced: The APB clock to stop generating events. */ void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced) { disable_irq(dw_ced->timer.irq); apbt_disable_int(&dw_ced->timer); } static void apbt_eoi(struct dw_apb_timer *timer) { apbt_readl(timer, APBTMR_N_EOI); } static irqreturn_t dw_apb_clockevent_irq(int irq, void *data) { struct clock_event_device *evt = data; struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); if (!evt->event_handler) { pr_info("Spurious APBT timer interrupt %d", irq); return IRQ_NONE; } if (dw_ced->eoi) dw_ced->eoi(&dw_ced->timer); evt->event_handler(evt); return IRQ_HANDLED; } static void apbt_enable_int(struct dw_apb_timer *timer) { unsigned long ctrl = apbt_readl(timer, APBTMR_N_CONTROL); /* clear pending intr */ apbt_readl(timer, APBTMR_N_EOI); ctrl &= ~APBTMR_CONTROL_INT; apbt_writel(timer, ctrl, APBTMR_N_CONTROL); } static void apbt_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { unsigned long ctrl; unsigned long period; struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); pr_debug("%s CPU %d mode=%d\n", __func__, first_cpu(*evt->cpumask), mode); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: period = DIV_ROUND_UP(dw_ced->timer.freq, HZ); ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); ctrl |= APBTMR_CONTROL_MODE_PERIODIC; apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); /* * DW APB p. 46, have to disable timer before load counter, * may cause sync problem. */ ctrl &= ~APBTMR_CONTROL_ENABLE; apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); udelay(1); pr_debug("Setting clock period %lu for HZ %d\n", period, HZ); apbt_writel(&dw_ced->timer, period, APBTMR_N_LOAD_COUNT); ctrl |= APBTMR_CONTROL_ENABLE; apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); break; case CLOCK_EVT_MODE_ONESHOT: ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); /* * set free running mode, this mode will let timer reload max * timeout which will give time (3min on 25MHz clock) to rearm * the next event, therefore emulate the one-shot mode. */ ctrl &= ~APBTMR_CONTROL_ENABLE; ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC; apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); /* write again to set free running mode */ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); /* * DW APB p. 46, load counter with all 1s before starting free * running mode. */ apbt_writel(&dw_ced->timer, ~0, APBTMR_N_LOAD_COUNT); ctrl &= ~APBTMR_CONTROL_INT; ctrl |= APBTMR_CONTROL_ENABLE; apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); ctrl &= ~APBTMR_CONTROL_ENABLE; apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); break; case CLOCK_EVT_MODE_RESUME: apbt_enable_int(&dw_ced->timer); break; } } static int apbt_next_event(unsigned long delta, struct clock_event_device *evt) { unsigned long ctrl; struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); /* Disable timer */ ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); ctrl &= ~APBTMR_CONTROL_ENABLE; apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); /* write new count */ apbt_writel(&dw_ced->timer, delta, APBTMR_N_LOAD_COUNT); ctrl |= APBTMR_CONTROL_ENABLE; apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); return 0; } /** * dw_apb_clockevent_init() - use an APB timer as a clock_event_device * * @cpu: The CPU the events will be targeted at. * @name: The name used for the timer and the IRQ for it. * @rating: The rating to give the timer. * @base: I/O base for the timer registers. * @irq: The interrupt number to use for the timer. * @freq: The frequency that the timer counts at. * * This creates a clock_event_device for using with the generic clock layer * but does not start and register it. This should be done with * dw_apb_clockevent_register() as the next step. If this is the first time * it has been called for a timer then the IRQ will be requested, if not it * just be enabled to allow CPU hotplug to avoid repeatedly requesting and * releasing the IRQ. */ struct dw_apb_clock_event_device * dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, void __iomem *base, int irq, unsigned long freq) { struct dw_apb_clock_event_device *dw_ced = kzalloc(sizeof(*dw_ced), GFP_KERNEL); int err; if (!dw_ced) return NULL; dw_ced->timer.base = base; dw_ced->timer.irq = irq; dw_ced->timer.freq = freq; clockevents_calc_mult_shift(&dw_ced->ced, freq, APBT_MIN_PERIOD); dw_ced->ced.max_delta_ns = clockevent_delta2ns(0x7fffffff, &dw_ced->ced); dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced); dw_ced->ced.cpumask = cpumask_of(cpu); dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; dw_ced->ced.set_mode = apbt_set_mode; dw_ced->ced.set_next_event = apbt_next_event; dw_ced->ced.irq = dw_ced->timer.irq; dw_ced->ced.rating = rating; dw_ced->ced.name = name; dw_ced->irqaction.name = dw_ced->ced.name; dw_ced->irqaction.handler = dw_apb_clockevent_irq; dw_ced->irqaction.dev_id = &dw_ced->ced; dw_ced->irqaction.irq = irq; dw_ced->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING | IRQF_DISABLED; dw_ced->eoi = apbt_eoi; err = setup_irq(irq, &dw_ced->irqaction); if (err) { pr_err("failed to request timer irq\n"); kfree(dw_ced); dw_ced = NULL; } return dw_ced; } /** * dw_apb_clockevent_resume() - resume a clock that has been paused. * * @dw_ced: The APB clock to resume. */ void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced) { enable_irq(dw_ced->timer.irq); } /** * dw_apb_clockevent_stop() - stop the clock_event_device and release the IRQ. * * @dw_ced: The APB clock to stop generating the events. */ void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced) { free_irq(dw_ced->timer.irq, &dw_ced->ced); } /** * dw_apb_clockevent_register() - register the clock with the generic layer * * @dw_ced: The APB clock to register as a clock_event_device. */ void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced) { apbt_writel(&dw_ced->timer, 0, APBTMR_N_CONTROL); clockevents_register_device(&dw_ced->ced); apbt_enable_int(&dw_ced->timer); } /** * dw_apb_clocksource_start() - start the clocksource counting. * * @dw_cs: The clocksource to start. * * This is used to start the clocksource before registration and can be used * to enable calibration of timers. */ void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs) { /* * start count down from 0xffff_ffff. this is done by toggling the * enable bit then load initial load count to ~0. */ unsigned long ctrl = apbt_readl(&dw_cs->timer, APBTMR_N_CONTROL); ctrl &= ~APBTMR_CONTROL_ENABLE; apbt_writel(&dw_cs->timer, ctrl, APBTMR_N_CONTROL); apbt_writel(&dw_cs->timer, ~0, APBTMR_N_LOAD_COUNT); /* enable, mask interrupt */ ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC; ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT); apbt_writel(&dw_cs->timer, ctrl, APBTMR_N_CONTROL); /* read it once to get cached counter value initialized */ dw_apb_clocksource_read(dw_cs); } static cycle_t __apbt_read_clocksource(struct clocksource *cs) { unsigned long current_count; struct dw_apb_clocksource *dw_cs = clocksource_to_dw_apb_clocksource(cs); current_count = apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); return (cycle_t)~current_count; } static void apbt_restart_clocksource(struct clocksource *cs) { struct dw_apb_clocksource *dw_cs = clocksource_to_dw_apb_clocksource(cs); dw_apb_clocksource_start(dw_cs); } /** * dw_apb_clocksource_init() - use an APB timer as a clocksource. * * @rating: The rating to give the clocksource. * @name: The name for the clocksource. * @base: The I/O base for the timer registers. * @freq: The frequency that the timer counts at. * * This creates a clocksource using an APB timer but does not yet register it * with the clocksource system. This should be done with * dw_apb_clocksource_register() as the next step. */ struct dw_apb_clocksource * dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base, unsigned long freq) { struct dw_apb_clocksource *dw_cs = kzalloc(sizeof(*dw_cs), GFP_KERNEL); if (!dw_cs) return NULL; dw_cs->timer.base = base; dw_cs->timer.freq = freq; dw_cs->cs.name = name; dw_cs->cs.rating = rating; dw_cs->cs.read = __apbt_read_clocksource; dw_cs->cs.mask = CLOCKSOURCE_MASK(32); dw_cs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS; dw_cs->cs.resume = apbt_restart_clocksource; return dw_cs; } /** * dw_apb_clocksource_register() - register the APB clocksource. * * @dw_cs: The clocksource to register. */ void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs) { clocksource_register_hz(&dw_cs->cs, dw_cs->timer.freq); } /** * dw_apb_clocksource_read() - read the current value of a clocksource. * * @dw_cs: The clocksource to read. */ cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs) { return (cycle_t)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); } /** * dw_apb_clocksource_unregister() - unregister and free a clocksource. * * @dw_cs: The clocksource to unregister/free. */ void dw_apb_clocksource_unregister(struct dw_apb_clocksource *dw_cs) { clocksource_unregister(&dw_cs->cs); kfree(dw_cs); }
gpl-2.0
ASAZING/Android-Kernel-Gt-s7390l
drivers/media/video/bt8xx/bttv-risc.c
8525
25962
/* bttv-risc.c -- interfaces to other kernel modules bttv risc code handling - memory management - generation (c) 2000-2003 Gerd Knorr <kraxel@bytesex.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> #include <asm/page.h> #include <asm/pgtable.h> #include <media/v4l2-ioctl.h> #include "bttvp.h" #define VCR_HACK_LINES 4 /* ---------------------------------------------------------- */ /* risc code generators */ int bttv_risc_packed(struct bttv *btv, struct btcx_riscmem *risc, struct scatterlist *sglist, unsigned int offset, unsigned int bpl, unsigned int padding, unsigned int skip_lines, unsigned int store_lines) { u32 instructions,line,todo; struct scatterlist *sg; __le32 *rp; int rc; /* estimate risc mem: worst case is one write per page border + one write per scan line + sync + jump (all 2 dwords). padding can cause next bpl to start close to a page border. First DMA region may be smaller than PAGE_SIZE */ instructions = skip_lines * 4; instructions += (1 + ((bpl + padding) * store_lines) / PAGE_SIZE + store_lines) * 8; instructions += 2 * 8; if ((rc = btcx_riscmem_alloc(btv->c.pci,risc,instructions)) < 0) return rc; /* sync instruction */ rp = risc->cpu; *(rp++) = cpu_to_le32(BT848_RISC_SYNC|BT848_FIFO_STATUS_FM1); *(rp++) = cpu_to_le32(0); while (skip_lines-- > 0) { *(rp++) = cpu_to_le32(BT848_RISC_SKIP | BT848_RISC_SOL | BT848_RISC_EOL | bpl); } /* scan lines */ sg = sglist; for (line = 0; line < store_lines; line++) { if ((btv->opt_vcr_hack) && (line >= (store_lines - VCR_HACK_LINES))) continue; while (offset && offset >= sg_dma_len(sg)) { offset -= sg_dma_len(sg); sg++; } if (bpl <= sg_dma_len(sg)-offset) { /* fits into current chunk */ *(rp++)=cpu_to_le32(BT848_RISC_WRITE|BT848_RISC_SOL| BT848_RISC_EOL|bpl); *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset); offset+=bpl; } else { /* scanline needs to be splitted */ todo = bpl; *(rp++)=cpu_to_le32(BT848_RISC_WRITE|BT848_RISC_SOL| (sg_dma_len(sg)-offset)); *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset); todo -= (sg_dma_len(sg)-offset); offset = 0; sg++; while (todo > sg_dma_len(sg)) { *(rp++)=cpu_to_le32(BT848_RISC_WRITE| sg_dma_len(sg)); *(rp++)=cpu_to_le32(sg_dma_address(sg)); todo -= sg_dma_len(sg); sg++; } *(rp++)=cpu_to_le32(BT848_RISC_WRITE|BT848_RISC_EOL| todo); *(rp++)=cpu_to_le32(sg_dma_address(sg)); offset += todo; } offset += padding; } /* save pointer to jmp instruction address */ risc->jmp = rp; BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size); return 0; } static int bttv_risc_planar(struct bttv *btv, struct btcx_riscmem *risc, struct scatterlist *sglist, unsigned int yoffset, unsigned int ybpl, unsigned int ypadding, unsigned int ylines, unsigned int uoffset, unsigned int voffset, unsigned int hshift, unsigned int vshift, unsigned int cpadding) { unsigned int instructions,line,todo,ylen,chroma; __le32 *rp; u32 ri; struct scatterlist *ysg; struct scatterlist *usg; struct scatterlist *vsg; int topfield = (0 == yoffset); int rc; /* estimate risc mem: worst case is one write per page border + one write per scan line (5 dwords) plus sync + jump (2 dwords) */ instructions = ((3 + (ybpl + ypadding) * ylines * 2) / PAGE_SIZE) + ylines; instructions += 2; if ((rc = btcx_riscmem_alloc(btv->c.pci,risc,instructions*4*5)) < 0) return rc; /* sync instruction */ rp = risc->cpu; *(rp++) = cpu_to_le32(BT848_RISC_SYNC|BT848_FIFO_STATUS_FM3); *(rp++) = cpu_to_le32(0); /* scan lines */ ysg = sglist; usg = sglist; vsg = sglist; for (line = 0; line < ylines; line++) { if ((btv->opt_vcr_hack) && (line >= (ylines - VCR_HACK_LINES))) continue; switch (vshift) { case 0: chroma = 1; break; case 1: if (topfield) chroma = ((line & 1) == 0); else chroma = ((line & 1) == 1); break; case 2: if (topfield) chroma = ((line & 3) == 0); else chroma = ((line & 3) == 2); break; default: chroma = 0; break; } for (todo = ybpl; todo > 0; todo -= ylen) { /* go to next sg entry if needed */ while (yoffset && yoffset >= sg_dma_len(ysg)) { yoffset -= sg_dma_len(ysg); ysg++; } while (uoffset && uoffset >= sg_dma_len(usg)) { uoffset -= sg_dma_len(usg); usg++; } while (voffset && voffset >= sg_dma_len(vsg)) { voffset -= sg_dma_len(vsg); vsg++; } /* calculate max number of bytes we can write */ ylen = todo; if (yoffset + ylen > sg_dma_len(ysg)) ylen = sg_dma_len(ysg) - yoffset; if (chroma) { if (uoffset + (ylen>>hshift) > sg_dma_len(usg)) ylen = (sg_dma_len(usg) - uoffset) << hshift; if (voffset + (ylen>>hshift) > sg_dma_len(vsg)) ylen = (sg_dma_len(vsg) - voffset) << hshift; ri = BT848_RISC_WRITE123; } else { ri = BT848_RISC_WRITE1S23; } if (ybpl == todo) ri |= BT848_RISC_SOL; if (ylen == todo) ri |= BT848_RISC_EOL; /* write risc instruction */ *(rp++)=cpu_to_le32(ri | ylen); *(rp++)=cpu_to_le32(((ylen >> hshift) << 16) | (ylen >> hshift)); *(rp++)=cpu_to_le32(sg_dma_address(ysg)+yoffset); yoffset += ylen; if (chroma) { *(rp++)=cpu_to_le32(sg_dma_address(usg)+uoffset); uoffset += ylen >> hshift; *(rp++)=cpu_to_le32(sg_dma_address(vsg)+voffset); voffset += ylen >> hshift; } } yoffset += ypadding; if (chroma) { uoffset += cpadding; voffset += cpadding; } } /* save pointer to jmp instruction address */ risc->jmp = rp; BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size); return 0; } static int bttv_risc_overlay(struct bttv *btv, struct btcx_riscmem *risc, const struct bttv_format *fmt, struct bttv_overlay *ov, int skip_even, int skip_odd) { int dwords, rc, line, maxy, start, end; unsigned skip, nskips; struct btcx_skiplist *skips; __le32 *rp; u32 ri,ra; u32 addr; /* skip list for window clipping */ if (NULL == (skips = kmalloc(sizeof(*skips) * ov->nclips,GFP_KERNEL))) return -ENOMEM; /* estimate risc mem: worst case is (1.5*clip+1) * lines instructions + sync + jump (all 2 dwords) */ dwords = (3 * ov->nclips + 2) * ((skip_even || skip_odd) ? (ov->w.height+1)>>1 : ov->w.height); dwords += 4; if ((rc = btcx_riscmem_alloc(btv->c.pci,risc,dwords*4)) < 0) { kfree(skips); return rc; } /* sync instruction */ rp = risc->cpu; *(rp++) = cpu_to_le32(BT848_RISC_SYNC|BT848_FIFO_STATUS_FM1); *(rp++) = cpu_to_le32(0); addr = (unsigned long)btv->fbuf.base; addr += btv->fbuf.fmt.bytesperline * ov->w.top; addr += (fmt->depth >> 3) * ov->w.left; /* scan lines */ for (maxy = -1, line = 0; line < ov->w.height; line++, addr += btv->fbuf.fmt.bytesperline) { if ((btv->opt_vcr_hack) && (line >= (ov->w.height - VCR_HACK_LINES))) continue; if ((line%2) == 0 && skip_even) continue; if ((line%2) == 1 && skip_odd) continue; /* calculate clipping */ if (line > maxy) btcx_calc_skips(line, ov->w.width, &maxy, skips, &nskips, ov->clips, ov->nclips); /* write out risc code */ for (start = 0, skip = 0; start < ov->w.width; start = end) { if (skip >= nskips) { ri = BT848_RISC_WRITE; end = ov->w.width; } else if (start < skips[skip].start) { ri = BT848_RISC_WRITE; end = skips[skip].start; } else { ri = BT848_RISC_SKIP; end = skips[skip].end; skip++; } if (BT848_RISC_WRITE == ri) ra = addr + (fmt->depth>>3)*start; else ra = 0; if (0 == start) ri |= BT848_RISC_SOL; if (ov->w.width == end) ri |= BT848_RISC_EOL; ri |= (fmt->depth>>3) * (end-start); *(rp++)=cpu_to_le32(ri); if (0 != ra) *(rp++)=cpu_to_le32(ra); } } /* save pointer to jmp instruction address */ risc->jmp = rp; BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size); kfree(skips); return 0; } /* ---------------------------------------------------------- */ static void bttv_calc_geo_old(struct bttv *btv, struct bttv_geometry *geo, int width, int height, int interleaved, const struct bttv_tvnorm *tvnorm) { u32 xsf, sr; int vdelay; int swidth = tvnorm->swidth; int totalwidth = tvnorm->totalwidth; int scaledtwidth = tvnorm->scaledtwidth; if (btv->input == btv->dig) { swidth = 720; totalwidth = 858; scaledtwidth = 858; } vdelay = tvnorm->vdelay; xsf = (width*scaledtwidth)/swidth; geo->hscale = ((totalwidth*4096UL)/xsf-4096); geo->hdelay = tvnorm->hdelayx1; geo->hdelay = (geo->hdelay*width)/swidth; geo->hdelay &= 0x3fe; sr = ((tvnorm->sheight >> (interleaved?0:1))*512)/height - 512; geo->vscale = (0x10000UL-sr) & 0x1fff; geo->crop = ((width>>8)&0x03) | ((geo->hdelay>>6)&0x0c) | ((tvnorm->sheight>>4)&0x30) | ((vdelay>>2)&0xc0); geo->vscale |= interleaved ? (BT848_VSCALE_INT<<8) : 0; geo->vdelay = vdelay; geo->width = width; geo->sheight = tvnorm->sheight; geo->vtotal = tvnorm->vtotal; if (btv->opt_combfilter) { geo->vtc = (width < 193) ? 2 : ((width < 385) ? 1 : 0); geo->comb = (width < 769) ? 1 : 0; } else { geo->vtc = 0; geo->comb = 0; } } static void bttv_calc_geo (struct bttv * btv, struct bttv_geometry * geo, unsigned int width, unsigned int height, int both_fields, const struct bttv_tvnorm * tvnorm, const struct v4l2_rect * crop) { unsigned int c_width; unsigned int c_height; u32 sr; if ((crop->left == tvnorm->cropcap.defrect.left && crop->top == tvnorm->cropcap.defrect.top && crop->width == tvnorm->cropcap.defrect.width && crop->height == tvnorm->cropcap.defrect.height && width <= tvnorm->swidth /* see PAL-Nc et al */) || btv->input == btv->dig) { bttv_calc_geo_old(btv, geo, width, height, both_fields, tvnorm); return; } /* For bug compatibility the image size checks permit scale factors > 16. See bttv_crop_calc_limits(). */ c_width = min((unsigned int) crop->width, width * 16); c_height = min((unsigned int) crop->height, height * 16); geo->width = width; geo->hscale = (c_width * 4096U + (width >> 1)) / width - 4096; /* Even to store Cb first, odd for Cr. */ geo->hdelay = ((crop->left * width + c_width) / c_width) & ~1; geo->sheight = c_height; geo->vdelay = crop->top - tvnorm->cropcap.bounds.top + MIN_VDELAY; sr = c_height >> !both_fields; sr = (sr * 512U + (height >> 1)) / height - 512; geo->vscale = (0x10000UL - sr) & 0x1fff; geo->vscale |= both_fields ? (BT848_VSCALE_INT << 8) : 0; geo->vtotal = tvnorm->vtotal; geo->crop = (((geo->width >> 8) & 0x03) | ((geo->hdelay >> 6) & 0x0c) | ((geo->sheight >> 4) & 0x30) | ((geo->vdelay >> 2) & 0xc0)); if (btv->opt_combfilter) { geo->vtc = (width < 193) ? 2 : ((width < 385) ? 1 : 0); geo->comb = (width < 769) ? 1 : 0; } else { geo->vtc = 0; geo->comb = 0; } } static void bttv_apply_geo(struct bttv *btv, struct bttv_geometry *geo, int odd) { int off = odd ? 0x80 : 0x00; if (geo->comb) btor(BT848_VSCALE_COMB, BT848_E_VSCALE_HI+off); else btand(~BT848_VSCALE_COMB, BT848_E_VSCALE_HI+off); btwrite(geo->vtc, BT848_E_VTC+off); btwrite(geo->hscale >> 8, BT848_E_HSCALE_HI+off); btwrite(geo->hscale & 0xff, BT848_E_HSCALE_LO+off); btaor((geo->vscale>>8), 0xe0, BT848_E_VSCALE_HI+off); btwrite(geo->vscale & 0xff, BT848_E_VSCALE_LO+off); btwrite(geo->width & 0xff, BT848_E_HACTIVE_LO+off); btwrite(geo->hdelay & 0xff, BT848_E_HDELAY_LO+off); btwrite(geo->sheight & 0xff, BT848_E_VACTIVE_LO+off); btwrite(geo->vdelay & 0xff, BT848_E_VDELAY_LO+off); btwrite(geo->crop, BT848_E_CROP+off); btwrite(geo->vtotal>>8, BT848_VTOTAL_HI); btwrite(geo->vtotal & 0xff, BT848_VTOTAL_LO); } /* ---------------------------------------------------------- */ /* risc group / risc main loop / dma management */ void bttv_set_dma(struct bttv *btv, int override) { unsigned long cmd; int capctl; btv->cap_ctl = 0; if (NULL != btv->curr.top) btv->cap_ctl |= 0x02; if (NULL != btv->curr.bottom) btv->cap_ctl |= 0x01; if (NULL != btv->cvbi) btv->cap_ctl |= 0x0c; capctl = 0; capctl |= (btv->cap_ctl & 0x03) ? 0x03 : 0x00; /* capture */ capctl |= (btv->cap_ctl & 0x0c) ? 0x0c : 0x00; /* vbi data */ capctl |= override; d2printk("%d: capctl=%x lirq=%d top=%08llx/%08llx even=%08llx/%08llx\n", btv->c.nr,capctl,btv->loop_irq, btv->cvbi ? (unsigned long long)btv->cvbi->top.dma : 0, btv->curr.top ? (unsigned long long)btv->curr.top->top.dma : 0, btv->cvbi ? (unsigned long long)btv->cvbi->bottom.dma : 0, btv->curr.bottom ? (unsigned long long)btv->curr.bottom->bottom.dma : 0); cmd = BT848_RISC_JUMP; if (btv->loop_irq) { cmd |= BT848_RISC_IRQ; cmd |= (btv->loop_irq & 0x0f) << 16; cmd |= (~btv->loop_irq & 0x0f) << 20; } if (btv->curr.frame_irq || btv->loop_irq || btv->cvbi) { mod_timer(&btv->timeout, jiffies+BTTV_TIMEOUT); } else { del_timer(&btv->timeout); } btv->main.cpu[RISC_SLOT_LOOP] = cpu_to_le32(cmd); btaor(capctl, ~0x0f, BT848_CAP_CTL); if (capctl) { if (btv->dma_on) return; btwrite(btv->main.dma, BT848_RISC_STRT_ADD); btor(3, BT848_GPIO_DMA_CTL); btv->dma_on = 1; } else { if (!btv->dma_on) return; btand(~3, BT848_GPIO_DMA_CTL); btv->dma_on = 0; } return; } int bttv_risc_init_main(struct bttv *btv) { int rc; if ((rc = btcx_riscmem_alloc(btv->c.pci,&btv->main,PAGE_SIZE)) < 0) return rc; dprintk("%d: risc main @ %08llx\n", btv->c.nr, (unsigned long long)btv->main.dma); btv->main.cpu[0] = cpu_to_le32(BT848_RISC_SYNC | BT848_RISC_RESYNC | BT848_FIFO_STATUS_VRE); btv->main.cpu[1] = cpu_to_le32(0); btv->main.cpu[2] = cpu_to_le32(BT848_RISC_JUMP); btv->main.cpu[3] = cpu_to_le32(btv->main.dma + (4<<2)); /* top field */ btv->main.cpu[4] = cpu_to_le32(BT848_RISC_JUMP); btv->main.cpu[5] = cpu_to_le32(btv->main.dma + (6<<2)); btv->main.cpu[6] = cpu_to_le32(BT848_RISC_JUMP); btv->main.cpu[7] = cpu_to_le32(btv->main.dma + (8<<2)); btv->main.cpu[8] = cpu_to_le32(BT848_RISC_SYNC | BT848_RISC_RESYNC | BT848_FIFO_STATUS_VRO); btv->main.cpu[9] = cpu_to_le32(0); /* bottom field */ btv->main.cpu[10] = cpu_to_le32(BT848_RISC_JUMP); btv->main.cpu[11] = cpu_to_le32(btv->main.dma + (12<<2)); btv->main.cpu[12] = cpu_to_le32(BT848_RISC_JUMP); btv->main.cpu[13] = cpu_to_le32(btv->main.dma + (14<<2)); /* jump back to top field */ btv->main.cpu[14] = cpu_to_le32(BT848_RISC_JUMP); btv->main.cpu[15] = cpu_to_le32(btv->main.dma + (0<<2)); return 0; } int bttv_risc_hook(struct bttv *btv, int slot, struct btcx_riscmem *risc, int irqflags) { unsigned long cmd; unsigned long next = btv->main.dma + ((slot+2) << 2); if (NULL == risc) { d2printk("%d: risc=%p slot[%d]=NULL\n", btv->c.nr, risc, slot); btv->main.cpu[slot+1] = cpu_to_le32(next); } else { d2printk("%d: risc=%p slot[%d]=%08llx irq=%d\n", btv->c.nr, risc, slot, (unsigned long long)risc->dma, irqflags); cmd = BT848_RISC_JUMP; if (irqflags) { cmd |= BT848_RISC_IRQ; cmd |= (irqflags & 0x0f) << 16; cmd |= (~irqflags & 0x0f) << 20; } risc->jmp[0] = cpu_to_le32(cmd); risc->jmp[1] = cpu_to_le32(next); btv->main.cpu[slot+1] = cpu_to_le32(risc->dma); } return 0; } void bttv_dma_free(struct videobuf_queue *q,struct bttv *btv, struct bttv_buffer *buf) { struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); BUG_ON(in_interrupt()); videobuf_waiton(q, &buf->vb, 0, 0); videobuf_dma_unmap(q->dev, dma); videobuf_dma_free(dma); btcx_riscmem_free(btv->c.pci,&buf->bottom); btcx_riscmem_free(btv->c.pci,&buf->top); buf->vb.state = VIDEOBUF_NEEDS_INIT; } int bttv_buffer_activate_vbi(struct bttv *btv, struct bttv_buffer *vbi) { struct btcx_riscmem *top; struct btcx_riscmem *bottom; int top_irq_flags; int bottom_irq_flags; top = NULL; bottom = NULL; top_irq_flags = 0; bottom_irq_flags = 0; if (vbi) { unsigned int crop, vdelay; vbi->vb.state = VIDEOBUF_ACTIVE; list_del(&vbi->vb.queue); /* VDELAY is start of video, end of VBI capturing. */ crop = btread(BT848_E_CROP); vdelay = btread(BT848_E_VDELAY_LO) + ((crop & 0xc0) << 2); if (vbi->geo.vdelay > vdelay) { vdelay = vbi->geo.vdelay & 0xfe; crop = (crop & 0x3f) | ((vbi->geo.vdelay >> 2) & 0xc0); btwrite(vdelay, BT848_E_VDELAY_LO); btwrite(crop, BT848_E_CROP); btwrite(vdelay, BT848_O_VDELAY_LO); btwrite(crop, BT848_O_CROP); } if (vbi->vbi_count[0] > 0) { top = &vbi->top; top_irq_flags = 4; } if (vbi->vbi_count[1] > 0) { top_irq_flags = 0; bottom = &vbi->bottom; bottom_irq_flags = 4; } } bttv_risc_hook(btv, RISC_SLOT_O_VBI, top, top_irq_flags); bttv_risc_hook(btv, RISC_SLOT_E_VBI, bottom, bottom_irq_flags); return 0; } int bttv_buffer_activate_video(struct bttv *btv, struct bttv_buffer_set *set) { /* video capture */ if (NULL != set->top && NULL != set->bottom) { if (set->top == set->bottom) { set->top->vb.state = VIDEOBUF_ACTIVE; if (set->top->vb.queue.next) list_del(&set->top->vb.queue); } else { set->top->vb.state = VIDEOBUF_ACTIVE; set->bottom->vb.state = VIDEOBUF_ACTIVE; if (set->top->vb.queue.next) list_del(&set->top->vb.queue); if (set->bottom->vb.queue.next) list_del(&set->bottom->vb.queue); } bttv_apply_geo(btv, &set->top->geo, 1); bttv_apply_geo(btv, &set->bottom->geo,0); bttv_risc_hook(btv, RISC_SLOT_O_FIELD, &set->top->top, set->top_irq); bttv_risc_hook(btv, RISC_SLOT_E_FIELD, &set->bottom->bottom, set->frame_irq); btaor((set->top->btformat & 0xf0) | (set->bottom->btformat & 0x0f), ~0xff, BT848_COLOR_FMT); btaor((set->top->btswap & 0x0a) | (set->bottom->btswap & 0x05), ~0x0f, BT848_COLOR_CTL); } else if (NULL != set->top) { set->top->vb.state = VIDEOBUF_ACTIVE; if (set->top->vb.queue.next) list_del(&set->top->vb.queue); bttv_apply_geo(btv, &set->top->geo,1); bttv_apply_geo(btv, &set->top->geo,0); bttv_risc_hook(btv, RISC_SLOT_O_FIELD, &set->top->top, set->frame_irq); bttv_risc_hook(btv, RISC_SLOT_E_FIELD, NULL, 0); btaor(set->top->btformat & 0xff, ~0xff, BT848_COLOR_FMT); btaor(set->top->btswap & 0x0f, ~0x0f, BT848_COLOR_CTL); } else if (NULL != set->bottom) { set->bottom->vb.state = VIDEOBUF_ACTIVE; if (set->bottom->vb.queue.next) list_del(&set->bottom->vb.queue); bttv_apply_geo(btv, &set->bottom->geo,1); bttv_apply_geo(btv, &set->bottom->geo,0); bttv_risc_hook(btv, RISC_SLOT_O_FIELD, NULL, 0); bttv_risc_hook(btv, RISC_SLOT_E_FIELD, &set->bottom->bottom, set->frame_irq); btaor(set->bottom->btformat & 0xff, ~0xff, BT848_COLOR_FMT); btaor(set->bottom->btswap & 0x0f, ~0x0f, BT848_COLOR_CTL); } else { bttv_risc_hook(btv, RISC_SLOT_O_FIELD, NULL, 0); bttv_risc_hook(btv, RISC_SLOT_E_FIELD, NULL, 0); } return 0; } /* ---------------------------------------------------------- */ /* calculate geometry, build risc code */ int bttv_buffer_risc(struct bttv *btv, struct bttv_buffer *buf) { const struct bttv_tvnorm *tvnorm = bttv_tvnorms + buf->tvnorm; struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); dprintk("%d: buffer field: %s format: %s size: %dx%d\n", btv->c.nr, v4l2_field_names[buf->vb.field], buf->fmt->name, buf->vb.width, buf->vb.height); /* packed pixel modes */ if (buf->fmt->flags & FORMAT_FLAGS_PACKED) { int bpl = (buf->fmt->depth >> 3) * buf->vb.width; int bpf = bpl * (buf->vb.height >> 1); bttv_calc_geo(btv,&buf->geo,buf->vb.width,buf->vb.height, V4L2_FIELD_HAS_BOTH(buf->vb.field), tvnorm,&buf->crop); switch (buf->vb.field) { case V4L2_FIELD_TOP: bttv_risc_packed(btv,&buf->top,dma->sglist, /* offset */ 0,bpl, /* padding */ 0,/* skip_lines */ 0, buf->vb.height); break; case V4L2_FIELD_BOTTOM: bttv_risc_packed(btv,&buf->bottom,dma->sglist, 0,bpl,0,0,buf->vb.height); break; case V4L2_FIELD_INTERLACED: bttv_risc_packed(btv,&buf->top,dma->sglist, 0,bpl,bpl,0,buf->vb.height >> 1); bttv_risc_packed(btv,&buf->bottom,dma->sglist, bpl,bpl,bpl,0,buf->vb.height >> 1); break; case V4L2_FIELD_SEQ_TB: bttv_risc_packed(btv,&buf->top,dma->sglist, 0,bpl,0,0,buf->vb.height >> 1); bttv_risc_packed(btv,&buf->bottom,dma->sglist, bpf,bpl,0,0,buf->vb.height >> 1); break; default: BUG(); } } /* planar modes */ if (buf->fmt->flags & FORMAT_FLAGS_PLANAR) { int uoffset, voffset; int ypadding, cpadding, lines; /* calculate chroma offsets */ uoffset = buf->vb.width * buf->vb.height; voffset = buf->vb.width * buf->vb.height; if (buf->fmt->flags & FORMAT_FLAGS_CrCb) { /* Y-Cr-Cb plane order */ uoffset >>= buf->fmt->hshift; uoffset >>= buf->fmt->vshift; uoffset += voffset; } else { /* Y-Cb-Cr plane order */ voffset >>= buf->fmt->hshift; voffset >>= buf->fmt->vshift; voffset += uoffset; } switch (buf->vb.field) { case V4L2_FIELD_TOP: bttv_calc_geo(btv,&buf->geo,buf->vb.width, buf->vb.height,/* both_fields */ 0, tvnorm,&buf->crop); bttv_risc_planar(btv, &buf->top, dma->sglist, 0,buf->vb.width,0,buf->vb.height, uoffset,voffset,buf->fmt->hshift, buf->fmt->vshift,0); break; case V4L2_FIELD_BOTTOM: bttv_calc_geo(btv,&buf->geo,buf->vb.width, buf->vb.height,0, tvnorm,&buf->crop); bttv_risc_planar(btv, &buf->bottom, dma->sglist, 0,buf->vb.width,0,buf->vb.height, uoffset,voffset,buf->fmt->hshift, buf->fmt->vshift,0); break; case V4L2_FIELD_INTERLACED: bttv_calc_geo(btv,&buf->geo,buf->vb.width, buf->vb.height,1, tvnorm,&buf->crop); lines = buf->vb.height >> 1; ypadding = buf->vb.width; cpadding = buf->vb.width >> buf->fmt->hshift; bttv_risc_planar(btv,&buf->top, dma->sglist, 0,buf->vb.width,ypadding,lines, uoffset,voffset, buf->fmt->hshift, buf->fmt->vshift, cpadding); bttv_risc_planar(btv,&buf->bottom, dma->sglist, ypadding,buf->vb.width,ypadding,lines, uoffset+cpadding, voffset+cpadding, buf->fmt->hshift, buf->fmt->vshift, cpadding); break; case V4L2_FIELD_SEQ_TB: bttv_calc_geo(btv,&buf->geo,buf->vb.width, buf->vb.height,1, tvnorm,&buf->crop); lines = buf->vb.height >> 1; ypadding = buf->vb.width; cpadding = buf->vb.width >> buf->fmt->hshift; bttv_risc_planar(btv,&buf->top, dma->sglist, 0,buf->vb.width,0,lines, uoffset >> 1, voffset >> 1, buf->fmt->hshift, buf->fmt->vshift, 0); bttv_risc_planar(btv,&buf->bottom, dma->sglist, lines * ypadding,buf->vb.width,0,lines, lines * ypadding + (uoffset >> 1), lines * ypadding + (voffset >> 1), buf->fmt->hshift, buf->fmt->vshift, 0); break; default: BUG(); } } /* raw data */ if (buf->fmt->flags & FORMAT_FLAGS_RAW) { /* build risc code */ buf->vb.field = V4L2_FIELD_SEQ_TB; bttv_calc_geo(btv,&buf->geo,tvnorm->swidth,tvnorm->sheight, 1,tvnorm,&buf->crop); bttv_risc_packed(btv, &buf->top, dma->sglist, /* offset */ 0, RAW_BPL, /* padding */ 0, /* skip_lines */ 0, RAW_LINES); bttv_risc_packed(btv, &buf->bottom, dma->sglist, buf->vb.size/2 , RAW_BPL, 0, 0, RAW_LINES); } /* copy format info */ buf->btformat = buf->fmt->btformat; buf->btswap = buf->fmt->btswap; return 0; } /* ---------------------------------------------------------- */ /* calculate geometry, build risc code */ int bttv_overlay_risc(struct bttv *btv, struct bttv_overlay *ov, const struct bttv_format *fmt, struct bttv_buffer *buf) { /* check interleave, bottom+top fields */ dprintk("%d: overlay fields: %s format: %s size: %dx%d\n", btv->c.nr, v4l2_field_names[buf->vb.field], fmt->name, ov->w.width, ov->w.height); /* calculate geometry */ bttv_calc_geo(btv,&buf->geo,ov->w.width,ov->w.height, V4L2_FIELD_HAS_BOTH(ov->field), &bttv_tvnorms[ov->tvnorm],&buf->crop); /* build risc code */ switch (ov->field) { case V4L2_FIELD_TOP: bttv_risc_overlay(btv, &buf->top, fmt, ov, 0, 0); break; case V4L2_FIELD_BOTTOM: bttv_risc_overlay(btv, &buf->bottom, fmt, ov, 0, 0); break; case V4L2_FIELD_INTERLACED: bttv_risc_overlay(btv, &buf->top, fmt, ov, 0, 1); bttv_risc_overlay(btv, &buf->bottom, fmt, ov, 1, 0); break; default: BUG(); } /* copy format info */ buf->btformat = fmt->btformat; buf->btswap = fmt->btswap; buf->vb.field = ov->field; return 0; } /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
lehmanju/kernel_lenovo_lifetab_e10312
lib/bch.c
12877
36404
/* * Generic binary BCH encoding/decoding library * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Copyright © 2011 Parrot S.A. * * Author: Ivan Djelic <ivan.djelic@parrot.com> * * Description: * * This library provides runtime configurable encoding/decoding of binary * Bose-Chaudhuri-Hocquenghem (BCH) codes. * * Call init_bch to get a pointer to a newly allocated bch_control structure for * the given m (Galois field order), t (error correction capability) and * (optional) primitive polynomial parameters. * * Call encode_bch to compute and store ecc parity bytes to a given buffer. * Call decode_bch to detect and locate errors in received data. * * On systems supporting hw BCH features, intermediate results may be provided * to decode_bch in order to skip certain steps. See decode_bch() documentation * for details. * * Option CONFIG_BCH_CONST_PARAMS can be used to force fixed values of * parameters m and t; thus allowing extra compiler optimizations and providing * better (up to 2x) encoding performance. Using this option makes sense when * (m,t) are fixed and known in advance, e.g. when using BCH error correction * on a particular NAND flash device. * * Algorithmic details: * * Encoding is performed by processing 32 input bits in parallel, using 4 * remainder lookup tables. * * The final stage of decoding involves the following internal steps: * a. Syndrome computation * b. Error locator polynomial computation using Berlekamp-Massey algorithm * c. Error locator root finding (by far the most expensive step) * * In this implementation, step c is not performed using the usual Chien search. * Instead, an alternative approach described in [1] is used. It consists in * factoring the error locator polynomial using the Berlekamp Trace algorithm * (BTA) down to a certain degree (4), after which ad hoc low-degree polynomial * solving techniques [2] are used. The resulting algorithm, called BTZ, yields * much better performance than Chien search for usual (m,t) values (typically * m >= 13, t < 32, see [1]). * * [1] B. Biswas, V. Herbert. Efficient root finding of polynomials over fields * of characteristic 2, in: Western European Workshop on Research in Cryptology * - WEWoRC 2009, Graz, Austria, LNCS, Springer, July 2009, to appear. * [2] [Zin96] V.A. Zinoviev. On the solution of equations of degree 10 over * finite fields GF(2^q). In Rapport de recherche INRIA no 2829, 1996. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/bitops.h> #include <asm/byteorder.h> #include <linux/bch.h> #if defined(CONFIG_BCH_CONST_PARAMS) #define GF_M(_p) (CONFIG_BCH_CONST_M) #define GF_T(_p) (CONFIG_BCH_CONST_T) #define GF_N(_p) ((1 << (CONFIG_BCH_CONST_M))-1) #else #define GF_M(_p) ((_p)->m) #define GF_T(_p) ((_p)->t) #define GF_N(_p) ((_p)->n) #endif #define BCH_ECC_WORDS(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 32) #define BCH_ECC_BYTES(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 8) #ifndef dbg #define dbg(_fmt, args...) do {} while (0) #endif /* * represent a polynomial over GF(2^m) */ struct gf_poly { unsigned int deg; /* polynomial degree */ unsigned int c[0]; /* polynomial terms */ }; /* given its degree, compute a polynomial size in bytes */ #define GF_POLY_SZ(_d) (sizeof(struct gf_poly)+((_d)+1)*sizeof(unsigned int)) /* polynomial of degree 1 */ struct gf_poly_deg1 { struct gf_poly poly; unsigned int c[2]; }; /* * same as encode_bch(), but process input data one byte at a time */ static void encode_bch_unaligned(struct bch_control *bch, const unsigned char *data, unsigned int len, uint32_t *ecc) { int i; const uint32_t *p; const int l = BCH_ECC_WORDS(bch)-1; while (len--) { p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(*data++)) & 0xff); for (i = 0; i < l; i++) ecc[i] = ((ecc[i] << 8)|(ecc[i+1] >> 24))^(*p++); ecc[l] = (ecc[l] << 8)^(*p); } } /* * convert ecc bytes to aligned, zero-padded 32-bit ecc words */ static void load_ecc8(struct bch_control *bch, uint32_t *dst, const uint8_t *src) { uint8_t pad[4] = {0, 0, 0, 0}; unsigned int i, nwords = BCH_ECC_WORDS(bch)-1; for (i = 0; i < nwords; i++, src += 4) dst[i] = (src[0] << 24)|(src[1] << 16)|(src[2] << 8)|src[3]; memcpy(pad, src, BCH_ECC_BYTES(bch)-4*nwords); dst[nwords] = (pad[0] << 24)|(pad[1] << 16)|(pad[2] << 8)|pad[3]; } /* * convert 32-bit ecc words to ecc bytes */ static void store_ecc8(struct bch_control *bch, uint8_t *dst, const uint32_t *src) { uint8_t pad[4]; unsigned int i, nwords = BCH_ECC_WORDS(bch)-1; for (i = 0; i < nwords; i++) { *dst++ = (src[i] >> 24); *dst++ = (src[i] >> 16) & 0xff; *dst++ = (src[i] >> 8) & 0xff; *dst++ = (src[i] >> 0) & 0xff; } pad[0] = (src[nwords] >> 24); pad[1] = (src[nwords] >> 16) & 0xff; pad[2] = (src[nwords] >> 8) & 0xff; pad[3] = (src[nwords] >> 0) & 0xff; memcpy(dst, pad, BCH_ECC_BYTES(bch)-4*nwords); } /** * encode_bch - calculate BCH ecc parity of data * @bch: BCH control structure * @data: data to encode * @len: data length in bytes * @ecc: ecc parity data, must be initialized by caller * * The @ecc parity array is used both as input and output parameter, in order to * allow incremental computations. It should be of the size indicated by member * @ecc_bytes of @bch, and should be initialized to 0 before the first call. * * The exact number of computed ecc parity bits is given by member @ecc_bits of * @bch; it may be less than m*t for large values of t. */ void encode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, uint8_t *ecc) { const unsigned int l = BCH_ECC_WORDS(bch)-1; unsigned int i, mlen; unsigned long m; uint32_t w, r[l+1]; const uint32_t * const tab0 = bch->mod8_tab; const uint32_t * const tab1 = tab0 + 256*(l+1); const uint32_t * const tab2 = tab1 + 256*(l+1); const uint32_t * const tab3 = tab2 + 256*(l+1); const uint32_t *pdata, *p0, *p1, *p2, *p3; if (ecc) { /* load ecc parity bytes into internal 32-bit buffer */ load_ecc8(bch, bch->ecc_buf, ecc); } else { memset(bch->ecc_buf, 0, sizeof(r)); } /* process first unaligned data bytes */ m = ((unsigned long)data) & 3; if (m) { mlen = (len < (4-m)) ? len : 4-m; encode_bch_unaligned(bch, data, mlen, bch->ecc_buf); data += mlen; len -= mlen; } /* process 32-bit aligned data words */ pdata = (uint32_t *)data; mlen = len/4; data += 4*mlen; len -= 4*mlen; memcpy(r, bch->ecc_buf, sizeof(r)); /* * split each 32-bit word into 4 polynomials of weight 8 as follows: * * 31 ...24 23 ...16 15 ... 8 7 ... 0 * xxxxxxxx yyyyyyyy zzzzzzzz tttttttt * tttttttt mod g = r0 (precomputed) * zzzzzzzz 00000000 mod g = r1 (precomputed) * yyyyyyyy 00000000 00000000 mod g = r2 (precomputed) * xxxxxxxx 00000000 00000000 00000000 mod g = r3 (precomputed) * xxxxxxxx yyyyyyyy zzzzzzzz tttttttt mod g = r0^r1^r2^r3 */ while (mlen--) { /* input data is read in big-endian format */ w = r[0]^cpu_to_be32(*pdata++); p0 = tab0 + (l+1)*((w >> 0) & 0xff); p1 = tab1 + (l+1)*((w >> 8) & 0xff); p2 = tab2 + (l+1)*((w >> 16) & 0xff); p3 = tab3 + (l+1)*((w >> 24) & 0xff); for (i = 0; i < l; i++) r[i] = r[i+1]^p0[i]^p1[i]^p2[i]^p3[i]; r[l] = p0[l]^p1[l]^p2[l]^p3[l]; } memcpy(bch->ecc_buf, r, sizeof(r)); /* process last unaligned bytes */ if (len) encode_bch_unaligned(bch, data, len, bch->ecc_buf); /* store ecc parity bytes into original parity buffer */ if (ecc) store_ecc8(bch, ecc, bch->ecc_buf); } EXPORT_SYMBOL_GPL(encode_bch); static inline int modulo(struct bch_control *bch, unsigned int v) { const unsigned int n = GF_N(bch); while (v >= n) { v -= n; v = (v & n) + (v >> GF_M(bch)); } return v; } /* * shorter and faster modulo function, only works when v < 2N. */ static inline int mod_s(struct bch_control *bch, unsigned int v) { const unsigned int n = GF_N(bch); return (v < n) ? v : v-n; } static inline int deg(unsigned int poly) { /* polynomial degree is the most-significant bit index */ return fls(poly)-1; } static inline int parity(unsigned int x) { /* * public domain code snippet, lifted from * http://www-graphics.stanford.edu/~seander/bithacks.html */ x ^= x >> 1; x ^= x >> 2; x = (x & 0x11111111U) * 0x11111111U; return (x >> 28) & 1; } /* Galois field basic operations: multiply, divide, inverse, etc. */ static inline unsigned int gf_mul(struct bch_control *bch, unsigned int a, unsigned int b) { return (a && b) ? bch->a_pow_tab[mod_s(bch, bch->a_log_tab[a]+ bch->a_log_tab[b])] : 0; } static inline unsigned int gf_sqr(struct bch_control *bch, unsigned int a) { return a ? bch->a_pow_tab[mod_s(bch, 2*bch->a_log_tab[a])] : 0; } static inline unsigned int gf_div(struct bch_control *bch, unsigned int a, unsigned int b) { return a ? bch->a_pow_tab[mod_s(bch, bch->a_log_tab[a]+ GF_N(bch)-bch->a_log_tab[b])] : 0; } static inline unsigned int gf_inv(struct bch_control *bch, unsigned int a) { return bch->a_pow_tab[GF_N(bch)-bch->a_log_tab[a]]; } static inline unsigned int a_pow(struct bch_control *bch, int i) { return bch->a_pow_tab[modulo(bch, i)]; } static inline int a_log(struct bch_control *bch, unsigned int x) { return bch->a_log_tab[x]; } static inline int a_ilog(struct bch_control *bch, unsigned int x) { return mod_s(bch, GF_N(bch)-bch->a_log_tab[x]); } /* * compute 2t syndromes of ecc polynomial, i.e. ecc(a^j) for j=1..2t */ static void compute_syndromes(struct bch_control *bch, uint32_t *ecc, unsigned int *syn) { int i, j, s; unsigned int m; uint32_t poly; const int t = GF_T(bch); s = bch->ecc_bits; /* make sure extra bits in last ecc word are cleared */ m = ((unsigned int)s) & 31; if (m) ecc[s/32] &= ~((1u << (32-m))-1); memset(syn, 0, 2*t*sizeof(*syn)); /* compute v(a^j) for j=1 .. 2t-1 */ do { poly = *ecc++; s -= 32; while (poly) { i = deg(poly); for (j = 0; j < 2*t; j += 2) syn[j] ^= a_pow(bch, (j+1)*(i+s)); poly ^= (1 << i); } } while (s > 0); /* v(a^(2j)) = v(a^j)^2 */ for (j = 0; j < t; j++) syn[2*j+1] = gf_sqr(bch, syn[j]); } static void gf_poly_copy(struct gf_poly *dst, struct gf_poly *src) { memcpy(dst, src, GF_POLY_SZ(src->deg)); } static int compute_error_locator_polynomial(struct bch_control *bch, const unsigned int *syn) { const unsigned int t = GF_T(bch); const unsigned int n = GF_N(bch); unsigned int i, j, tmp, l, pd = 1, d = syn[0]; struct gf_poly *elp = bch->elp; struct gf_poly *pelp = bch->poly_2t[0]; struct gf_poly *elp_copy = bch->poly_2t[1]; int k, pp = -1; memset(pelp, 0, GF_POLY_SZ(2*t)); memset(elp, 0, GF_POLY_SZ(2*t)); pelp->deg = 0; pelp->c[0] = 1; elp->deg = 0; elp->c[0] = 1; /* use simplified binary Berlekamp-Massey algorithm */ for (i = 0; (i < t) && (elp->deg <= t); i++) { if (d) { k = 2*i-pp; gf_poly_copy(elp_copy, elp); /* e[i+1](X) = e[i](X)+di*dp^-1*X^2(i-p)*e[p](X) */ tmp = a_log(bch, d)+n-a_log(bch, pd); for (j = 0; j <= pelp->deg; j++) { if (pelp->c[j]) { l = a_log(bch, pelp->c[j]); elp->c[j+k] ^= a_pow(bch, tmp+l); } } /* compute l[i+1] = max(l[i]->c[l[p]+2*(i-p]) */ tmp = pelp->deg+k; if (tmp > elp->deg) { elp->deg = tmp; gf_poly_copy(pelp, elp_copy); pd = d; pp = 2*i; } } /* di+1 = S(2i+3)+elp[i+1].1*S(2i+2)+...+elp[i+1].lS(2i+3-l) */ if (i < t-1) { d = syn[2*i+2]; for (j = 1; j <= elp->deg; j++) d ^= gf_mul(bch, elp->c[j], syn[2*i+2-j]); } } dbg("elp=%s\n", gf_poly_str(elp)); return (elp->deg > t) ? -1 : (int)elp->deg; } /* * solve a m x m linear system in GF(2) with an expected number of solutions, * and return the number of found solutions */ static int solve_linear_system(struct bch_control *bch, unsigned int *rows, unsigned int *sol, int nsol) { const int m = GF_M(bch); unsigned int tmp, mask; int rem, c, r, p, k, param[m]; k = 0; mask = 1 << m; /* Gaussian elimination */ for (c = 0; c < m; c++) { rem = 0; p = c-k; /* find suitable row for elimination */ for (r = p; r < m; r++) { if (rows[r] & mask) { if (r != p) { tmp = rows[r]; rows[r] = rows[p]; rows[p] = tmp; } rem = r+1; break; } } if (rem) { /* perform elimination on remaining rows */ tmp = rows[p]; for (r = rem; r < m; r++) { if (rows[r] & mask) rows[r] ^= tmp; } } else { /* elimination not needed, store defective row index */ param[k++] = c; } mask >>= 1; } /* rewrite system, inserting fake parameter rows */ if (k > 0) { p = k; for (r = m-1; r >= 0; r--) { if ((r > m-1-k) && rows[r]) /* system has no solution */ return 0; rows[r] = (p && (r == param[p-1])) ? p--, 1u << (m-r) : rows[r-p]; } } if (nsol != (1 << k)) /* unexpected number of solutions */ return 0; for (p = 0; p < nsol; p++) { /* set parameters for p-th solution */ for (c = 0; c < k; c++) rows[param[c]] = (rows[param[c]] & ~1)|((p >> c) & 1); /* compute unique solution */ tmp = 0; for (r = m-1; r >= 0; r--) { mask = rows[r] & (tmp|1); tmp |= parity(mask) << (m-r); } sol[p] = tmp >> 1; } return nsol; } /* * this function builds and solves a linear system for finding roots of a degree * 4 affine monic polynomial X^4+aX^2+bX+c over GF(2^m). */ static int find_affine4_roots(struct bch_control *bch, unsigned int a, unsigned int b, unsigned int c, unsigned int *roots) { int i, j, k; const int m = GF_M(bch); unsigned int mask = 0xff, t, rows[16] = {0,}; j = a_log(bch, b); k = a_log(bch, a); rows[0] = c; /* buid linear system to solve X^4+aX^2+bX+c = 0 */ for (i = 0; i < m; i++) { rows[i+1] = bch->a_pow_tab[4*i]^ (a ? bch->a_pow_tab[mod_s(bch, k)] : 0)^ (b ? bch->a_pow_tab[mod_s(bch, j)] : 0); j++; k += 2; } /* * transpose 16x16 matrix before passing it to linear solver * warning: this code assumes m < 16 */ for (j = 8; j != 0; j >>= 1, mask ^= (mask << j)) { for (k = 0; k < 16; k = (k+j+1) & ~j) { t = ((rows[k] >> j)^rows[k+j]) & mask; rows[k] ^= (t << j); rows[k+j] ^= t; } } return solve_linear_system(bch, rows, roots, 4); } /* * compute root r of a degree 1 polynomial over GF(2^m) (returned as log(1/r)) */ static int find_poly_deg1_roots(struct bch_control *bch, struct gf_poly *poly, unsigned int *roots) { int n = 0; if (poly->c[0]) /* poly[X] = bX+c with c!=0, root=c/b */ roots[n++] = mod_s(bch, GF_N(bch)-bch->a_log_tab[poly->c[0]]+ bch->a_log_tab[poly->c[1]]); return n; } /* * compute roots of a degree 2 polynomial over GF(2^m) */ static int find_poly_deg2_roots(struct bch_control *bch, struct gf_poly *poly, unsigned int *roots) { int n = 0, i, l0, l1, l2; unsigned int u, v, r; if (poly->c[0] && poly->c[1]) { l0 = bch->a_log_tab[poly->c[0]]; l1 = bch->a_log_tab[poly->c[1]]; l2 = bch->a_log_tab[poly->c[2]]; /* using z=a/bX, transform aX^2+bX+c into z^2+z+u (u=ac/b^2) */ u = a_pow(bch, l0+l2+2*(GF_N(bch)-l1)); /* * let u = sum(li.a^i) i=0..m-1; then compute r = sum(li.xi): * r^2+r = sum(li.(xi^2+xi)) = sum(li.(a^i+Tr(a^i).a^k)) = * u + sum(li.Tr(a^i).a^k) = u+a^k.Tr(sum(li.a^i)) = u+a^k.Tr(u) * i.e. r and r+1 are roots iff Tr(u)=0 */ r = 0; v = u; while (v) { i = deg(v); r ^= bch->xi_tab[i]; v ^= (1 << i); } /* verify root */ if ((gf_sqr(bch, r)^r) == u) { /* reverse z=a/bX transformation and compute log(1/r) */ roots[n++] = modulo(bch, 2*GF_N(bch)-l1- bch->a_log_tab[r]+l2); roots[n++] = modulo(bch, 2*GF_N(bch)-l1- bch->a_log_tab[r^1]+l2); } } return n; } /* * compute roots of a degree 3 polynomial over GF(2^m) */ static int find_poly_deg3_roots(struct bch_control *bch, struct gf_poly *poly, unsigned int *roots) { int i, n = 0; unsigned int a, b, c, a2, b2, c2, e3, tmp[4]; if (poly->c[0]) { /* transform polynomial into monic X^3 + a2X^2 + b2X + c2 */ e3 = poly->c[3]; c2 = gf_div(bch, poly->c[0], e3); b2 = gf_div(bch, poly->c[1], e3); a2 = gf_div(bch, poly->c[2], e3); /* (X+a2)(X^3+a2X^2+b2X+c2) = X^4+aX^2+bX+c (affine) */ c = gf_mul(bch, a2, c2); /* c = a2c2 */ b = gf_mul(bch, a2, b2)^c2; /* b = a2b2 + c2 */ a = gf_sqr(bch, a2)^b2; /* a = a2^2 + b2 */ /* find the 4 roots of this affine polynomial */ if (find_affine4_roots(bch, a, b, c, tmp) == 4) { /* remove a2 from final list of roots */ for (i = 0; i < 4; i++) { if (tmp[i] != a2) roots[n++] = a_ilog(bch, tmp[i]); } } } return n; } /* * compute roots of a degree 4 polynomial over GF(2^m) */ static int find_poly_deg4_roots(struct bch_control *bch, struct gf_poly *poly, unsigned int *roots) { int i, l, n = 0; unsigned int a, b, c, d, e = 0, f, a2, b2, c2, e4; if (poly->c[0] == 0) return 0; /* transform polynomial into monic X^4 + aX^3 + bX^2 + cX + d */ e4 = poly->c[4]; d = gf_div(bch, poly->c[0], e4); c = gf_div(bch, poly->c[1], e4); b = gf_div(bch, poly->c[2], e4); a = gf_div(bch, poly->c[3], e4); /* use Y=1/X transformation to get an affine polynomial */ if (a) { /* first, eliminate cX by using z=X+e with ae^2+c=0 */ if (c) { /* compute e such that e^2 = c/a */ f = gf_div(bch, c, a); l = a_log(bch, f); l += (l & 1) ? GF_N(bch) : 0; e = a_pow(bch, l/2); /* * use transformation z=X+e: * z^4+e^4 + a(z^3+ez^2+e^2z+e^3) + b(z^2+e^2) +cz+ce+d * z^4 + az^3 + (ae+b)z^2 + (ae^2+c)z+e^4+be^2+ae^3+ce+d * z^4 + az^3 + (ae+b)z^2 + e^4+be^2+d * z^4 + az^3 + b'z^2 + d' */ d = a_pow(bch, 2*l)^gf_mul(bch, b, f)^d; b = gf_mul(bch, a, e)^b; } /* now, use Y=1/X to get Y^4 + b/dY^2 + a/dY + 1/d */ if (d == 0) /* assume all roots have multiplicity 1 */ return 0; c2 = gf_inv(bch, d); b2 = gf_div(bch, a, d); a2 = gf_div(bch, b, d); } else { /* polynomial is already affine */ c2 = d; b2 = c; a2 = b; } /* find the 4 roots of this affine polynomial */ if (find_affine4_roots(bch, a2, b2, c2, roots) == 4) { for (i = 0; i < 4; i++) { /* post-process roots (reverse transformations) */ f = a ? gf_inv(bch, roots[i]) : roots[i]; roots[i] = a_ilog(bch, f^e); } n = 4; } return n; } /* * build monic, log-based representation of a polynomial */ static void gf_poly_logrep(struct bch_control *bch, const struct gf_poly *a, int *rep) { int i, d = a->deg, l = GF_N(bch)-a_log(bch, a->c[a->deg]); /* represent 0 values with -1; warning, rep[d] is not set to 1 */ for (i = 0; i < d; i++) rep[i] = a->c[i] ? mod_s(bch, a_log(bch, a->c[i])+l) : -1; } /* * compute polynomial Euclidean division remainder in GF(2^m)[X] */ static void gf_poly_mod(struct bch_control *bch, struct gf_poly *a, const struct gf_poly *b, int *rep) { int la, p, m; unsigned int i, j, *c = a->c; const unsigned int d = b->deg; if (a->deg < d) return; /* reuse or compute log representation of denominator */ if (!rep) { rep = bch->cache; gf_poly_logrep(bch, b, rep); } for (j = a->deg; j >= d; j--) { if (c[j]) { la = a_log(bch, c[j]); p = j-d; for (i = 0; i < d; i++, p++) { m = rep[i]; if (m >= 0) c[p] ^= bch->a_pow_tab[mod_s(bch, m+la)]; } } } a->deg = d-1; while (!c[a->deg] && a->deg) a->deg--; } /* * compute polynomial Euclidean division quotient in GF(2^m)[X] */ static void gf_poly_div(struct bch_control *bch, struct gf_poly *a, const struct gf_poly *b, struct gf_poly *q) { if (a->deg >= b->deg) { q->deg = a->deg-b->deg; /* compute a mod b (modifies a) */ gf_poly_mod(bch, a, b, NULL); /* quotient is stored in upper part of polynomial a */ memcpy(q->c, &a->c[b->deg], (1+q->deg)*sizeof(unsigned int)); } else { q->deg = 0; q->c[0] = 0; } } /* * compute polynomial GCD (Greatest Common Divisor) in GF(2^m)[X] */ static struct gf_poly *gf_poly_gcd(struct bch_control *bch, struct gf_poly *a, struct gf_poly *b) { struct gf_poly *tmp; dbg("gcd(%s,%s)=", gf_poly_str(a), gf_poly_str(b)); if (a->deg < b->deg) { tmp = b; b = a; a = tmp; } while (b->deg > 0) { gf_poly_mod(bch, a, b, NULL); tmp = b; b = a; a = tmp; } dbg("%s\n", gf_poly_str(a)); return a; } /* * Given a polynomial f and an integer k, compute Tr(a^kX) mod f * This is used in Berlekamp Trace algorithm for splitting polynomials */ static void compute_trace_bk_mod(struct bch_control *bch, int k, const struct gf_poly *f, struct gf_poly *z, struct gf_poly *out) { const int m = GF_M(bch); int i, j; /* z contains z^2j mod f */ z->deg = 1; z->c[0] = 0; z->c[1] = bch->a_pow_tab[k]; out->deg = 0; memset(out, 0, GF_POLY_SZ(f->deg)); /* compute f log representation only once */ gf_poly_logrep(bch, f, bch->cache); for (i = 0; i < m; i++) { /* add a^(k*2^i)(z^(2^i) mod f) and compute (z^(2^i) mod f)^2 */ for (j = z->deg; j >= 0; j--) { out->c[j] ^= z->c[j]; z->c[2*j] = gf_sqr(bch, z->c[j]); z->c[2*j+1] = 0; } if (z->deg > out->deg) out->deg = z->deg; if (i < m-1) { z->deg *= 2; /* z^(2(i+1)) mod f = (z^(2^i) mod f)^2 mod f */ gf_poly_mod(bch, z, f, bch->cache); } } while (!out->c[out->deg] && out->deg) out->deg--; dbg("Tr(a^%d.X) mod f = %s\n", k, gf_poly_str(out)); } /* * factor a polynomial using Berlekamp Trace algorithm (BTA) */ static void factor_polynomial(struct bch_control *bch, int k, struct gf_poly *f, struct gf_poly **g, struct gf_poly **h) { struct gf_poly *f2 = bch->poly_2t[0]; struct gf_poly *q = bch->poly_2t[1]; struct gf_poly *tk = bch->poly_2t[2]; struct gf_poly *z = bch->poly_2t[3]; struct gf_poly *gcd; dbg("factoring %s...\n", gf_poly_str(f)); *g = f; *h = NULL; /* tk = Tr(a^k.X) mod f */ compute_trace_bk_mod(bch, k, f, z, tk); if (tk->deg > 0) { /* compute g = gcd(f, tk) (destructive operation) */ gf_poly_copy(f2, f); gcd = gf_poly_gcd(bch, f2, tk); if (gcd->deg < f->deg) { /* compute h=f/gcd(f,tk); this will modify f and q */ gf_poly_div(bch, f, gcd, q); /* store g and h in-place (clobbering f) */ *h = &((struct gf_poly_deg1 *)f)[gcd->deg].poly; gf_poly_copy(*g, gcd); gf_poly_copy(*h, q); } } } /* * find roots of a polynomial, using BTZ algorithm; see the beginning of this * file for details */ static int find_poly_roots(struct bch_control *bch, unsigned int k, struct gf_poly *poly, unsigned int *roots) { int cnt; struct gf_poly *f1, *f2; switch (poly->deg) { /* handle low degree polynomials with ad hoc techniques */ case 1: cnt = find_poly_deg1_roots(bch, poly, roots); break; case 2: cnt = find_poly_deg2_roots(bch, poly, roots); break; case 3: cnt = find_poly_deg3_roots(bch, poly, roots); break; case 4: cnt = find_poly_deg4_roots(bch, poly, roots); break; default: /* factor polynomial using Berlekamp Trace Algorithm (BTA) */ cnt = 0; if (poly->deg && (k <= GF_M(bch))) { factor_polynomial(bch, k, poly, &f1, &f2); if (f1) cnt += find_poly_roots(bch, k+1, f1, roots); if (f2) cnt += find_poly_roots(bch, k+1, f2, roots+cnt); } break; } return cnt; } #if defined(USE_CHIEN_SEARCH) /* * exhaustive root search (Chien) implementation - not used, included only for * reference/comparison tests */ static int chien_search(struct bch_control *bch, unsigned int len, struct gf_poly *p, unsigned int *roots) { int m; unsigned int i, j, syn, syn0, count = 0; const unsigned int k = 8*len+bch->ecc_bits; /* use a log-based representation of polynomial */ gf_poly_logrep(bch, p, bch->cache); bch->cache[p->deg] = 0; syn0 = gf_div(bch, p->c[0], p->c[p->deg]); for (i = GF_N(bch)-k+1; i <= GF_N(bch); i++) { /* compute elp(a^i) */ for (j = 1, syn = syn0; j <= p->deg; j++) { m = bch->cache[j]; if (m >= 0) syn ^= a_pow(bch, m+j*i); } if (syn == 0) { roots[count++] = GF_N(bch)-i; if (count == p->deg) break; } } return (count == p->deg) ? count : 0; } #define find_poly_roots(_p, _k, _elp, _loc) chien_search(_p, len, _elp, _loc) #endif /* USE_CHIEN_SEARCH */ /** * decode_bch - decode received codeword and find bit error locations * @bch: BCH control structure * @data: received data, ignored if @calc_ecc is provided * @len: data length in bytes, must always be provided * @recv_ecc: received ecc, if NULL then assume it was XORed in @calc_ecc * @calc_ecc: calculated ecc, if NULL then calc_ecc is computed from @data * @syn: hw computed syndrome data (if NULL, syndrome is calculated) * @errloc: output array of error locations * * Returns: * The number of errors found, or -EBADMSG if decoding failed, or -EINVAL if * invalid parameters were provided * * Depending on the available hw BCH support and the need to compute @calc_ecc * separately (using encode_bch()), this function should be called with one of * the following parameter configurations - * * by providing @data and @recv_ecc only: * decode_bch(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc) * * by providing @recv_ecc and @calc_ecc: * decode_bch(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc) * * by providing ecc = recv_ecc XOR calc_ecc: * decode_bch(@bch, NULL, @len, NULL, ecc, NULL, @errloc) * * by providing syndrome results @syn: * decode_bch(@bch, NULL, @len, NULL, NULL, @syn, @errloc) * * Once decode_bch() has successfully returned with a positive value, error * locations returned in array @errloc should be interpreted as follows - * * if (errloc[n] >= 8*len), then n-th error is located in ecc (no need for * data correction) * * if (errloc[n] < 8*len), then n-th error is located in data and can be * corrected with statement data[errloc[n]/8] ^= 1 << (errloc[n] % 8); * * Note that this function does not perform any data correction by itself, it * merely indicates error locations. */ int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, const uint8_t *recv_ecc, const uint8_t *calc_ecc, const unsigned int *syn, unsigned int *errloc) { const unsigned int ecc_words = BCH_ECC_WORDS(bch); unsigned int nbits; int i, err, nroots; uint32_t sum; /* sanity check: make sure data length can be handled */ if (8*len > (bch->n-bch->ecc_bits)) return -EINVAL; /* if caller does not provide syndromes, compute them */ if (!syn) { if (!calc_ecc) { /* compute received data ecc into an internal buffer */ if (!data || !recv_ecc) return -EINVAL; encode_bch(bch, data, len, NULL); } else { /* load provided calculated ecc */ load_ecc8(bch, bch->ecc_buf, calc_ecc); } /* load received ecc or assume it was XORed in calc_ecc */ if (recv_ecc) { load_ecc8(bch, bch->ecc_buf2, recv_ecc); /* XOR received and calculated ecc */ for (i = 0, sum = 0; i < (int)ecc_words; i++) { bch->ecc_buf[i] ^= bch->ecc_buf2[i]; sum |= bch->ecc_buf[i]; } if (!sum) /* no error found */ return 0; } compute_syndromes(bch, bch->ecc_buf, bch->syn); syn = bch->syn; } err = compute_error_locator_polynomial(bch, syn); if (err > 0) { nroots = find_poly_roots(bch, 1, bch->elp, errloc); if (err != nroots) err = -1; } if (err > 0) { /* post-process raw error locations for easier correction */ nbits = (len*8)+bch->ecc_bits; for (i = 0; i < err; i++) { if (errloc[i] >= nbits) { err = -1; break; } errloc[i] = nbits-1-errloc[i]; errloc[i] = (errloc[i] & ~7)|(7-(errloc[i] & 7)); } } return (err >= 0) ? err : -EBADMSG; } EXPORT_SYMBOL_GPL(decode_bch); /* * generate Galois field lookup tables */ static int build_gf_tables(struct bch_control *bch, unsigned int poly) { unsigned int i, x = 1; const unsigned int k = 1 << deg(poly); /* primitive polynomial must be of degree m */ if (k != (1u << GF_M(bch))) return -1; for (i = 0; i < GF_N(bch); i++) { bch->a_pow_tab[i] = x; bch->a_log_tab[x] = i; if (i && (x == 1)) /* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */ return -1; x <<= 1; if (x & k) x ^= poly; } bch->a_pow_tab[GF_N(bch)] = 1; bch->a_log_tab[0] = 0; return 0; } /* * compute generator polynomial remainder tables for fast encoding */ static void build_mod8_tables(struct bch_control *bch, const uint32_t *g) { int i, j, b, d; uint32_t data, hi, lo, *tab; const int l = BCH_ECC_WORDS(bch); const int plen = DIV_ROUND_UP(bch->ecc_bits+1, 32); const int ecclen = DIV_ROUND_UP(bch->ecc_bits, 32); memset(bch->mod8_tab, 0, 4*256*l*sizeof(*bch->mod8_tab)); for (i = 0; i < 256; i++) { /* p(X)=i is a small polynomial of weight <= 8 */ for (b = 0; b < 4; b++) { /* we want to compute (p(X).X^(8*b+deg(g))) mod g(X) */ tab = bch->mod8_tab + (b*256+i)*l; data = i << (8*b); while (data) { d = deg(data); /* subtract X^d.g(X) from p(X).X^(8*b+deg(g)) */ data ^= g[0] >> (31-d); for (j = 0; j < ecclen; j++) { hi = (d < 31) ? g[j] << (d+1) : 0; lo = (j+1 < plen) ? g[j+1] >> (31-d) : 0; tab[j] ^= hi|lo; } } } } } /* * build a base for factoring degree 2 polynomials */ static int build_deg2_base(struct bch_control *bch) { const int m = GF_M(bch); int i, j, r; unsigned int sum, x, y, remaining, ak = 0, xi[m]; /* find k s.t. Tr(a^k) = 1 and 0 <= k < m */ for (i = 0; i < m; i++) { for (j = 0, sum = 0; j < m; j++) sum ^= a_pow(bch, i*(1 << j)); if (sum) { ak = bch->a_pow_tab[i]; break; } } /* find xi, i=0..m-1 such that xi^2+xi = a^i+Tr(a^i).a^k */ remaining = m; memset(xi, 0, sizeof(xi)); for (x = 0; (x <= GF_N(bch)) && remaining; x++) { y = gf_sqr(bch, x)^x; for (i = 0; i < 2; i++) { r = a_log(bch, y); if (y && (r < m) && !xi[r]) { bch->xi_tab[r] = x; xi[r] = 1; remaining--; dbg("x%d = %x\n", r, x); break; } y ^= ak; } } /* should not happen but check anyway */ return remaining ? -1 : 0; } static void *bch_alloc(size_t size, int *err) { void *ptr; ptr = kmalloc(size, GFP_KERNEL); if (ptr == NULL) *err = 1; return ptr; } /* * compute generator polynomial for given (m,t) parameters. */ static uint32_t *compute_generator_polynomial(struct bch_control *bch) { const unsigned int m = GF_M(bch); const unsigned int t = GF_T(bch); int n, err = 0; unsigned int i, j, nbits, r, word, *roots; struct gf_poly *g; uint32_t *genpoly; g = bch_alloc(GF_POLY_SZ(m*t), &err); roots = bch_alloc((bch->n+1)*sizeof(*roots), &err); genpoly = bch_alloc(DIV_ROUND_UP(m*t+1, 32)*sizeof(*genpoly), &err); if (err) { kfree(genpoly); genpoly = NULL; goto finish; } /* enumerate all roots of g(X) */ memset(roots , 0, (bch->n+1)*sizeof(*roots)); for (i = 0; i < t; i++) { for (j = 0, r = 2*i+1; j < m; j++) { roots[r] = 1; r = mod_s(bch, 2*r); } } /* build generator polynomial g(X) */ g->deg = 0; g->c[0] = 1; for (i = 0; i < GF_N(bch); i++) { if (roots[i]) { /* multiply g(X) by (X+root) */ r = bch->a_pow_tab[i]; g->c[g->deg+1] = 1; for (j = g->deg; j > 0; j--) g->c[j] = gf_mul(bch, g->c[j], r)^g->c[j-1]; g->c[0] = gf_mul(bch, g->c[0], r); g->deg++; } } /* store left-justified binary representation of g(X) */ n = g->deg+1; i = 0; while (n > 0) { nbits = (n > 32) ? 32 : n; for (j = 0, word = 0; j < nbits; j++) { if (g->c[n-1-j]) word |= 1u << (31-j); } genpoly[i++] = word; n -= nbits; } bch->ecc_bits = g->deg; finish: kfree(g); kfree(roots); return genpoly; } /** * init_bch - initialize a BCH encoder/decoder * @m: Galois field order, should be in the range 5-15 * @t: maximum error correction capability, in bits * @prim_poly: user-provided primitive polynomial (or 0 to use default) * * Returns: * a newly allocated BCH control structure if successful, NULL otherwise * * This initialization can take some time, as lookup tables are built for fast * encoding/decoding; make sure not to call this function from a time critical * path. Usually, init_bch() should be called on module/driver init and * free_bch() should be called to release memory on exit. * * You may provide your own primitive polynomial of degree @m in argument * @prim_poly, or let init_bch() use its default polynomial. * * Once init_bch() has successfully returned a pointer to a newly allocated * BCH control structure, ecc length in bytes is given by member @ecc_bytes of * the structure. */ struct bch_control *init_bch(int m, int t, unsigned int prim_poly) { int err = 0; unsigned int i, words; uint32_t *genpoly; struct bch_control *bch = NULL; const int min_m = 5; const int max_m = 15; /* default primitive polynomials */ static const unsigned int prim_poly_tab[] = { 0x25, 0x43, 0x83, 0x11d, 0x211, 0x409, 0x805, 0x1053, 0x201b, 0x402b, 0x8003, }; #if defined(CONFIG_BCH_CONST_PARAMS) if ((m != (CONFIG_BCH_CONST_M)) || (t != (CONFIG_BCH_CONST_T))) { printk(KERN_ERR "bch encoder/decoder was configured to support " "parameters m=%d, t=%d only!\n", CONFIG_BCH_CONST_M, CONFIG_BCH_CONST_T); goto fail; } #endif if ((m < min_m) || (m > max_m)) /* * values of m greater than 15 are not currently supported; * supporting m > 15 would require changing table base type * (uint16_t) and a small patch in matrix transposition */ goto fail; /* sanity checks */ if ((t < 1) || (m*t >= ((1 << m)-1))) /* invalid t value */ goto fail; /* select a primitive polynomial for generating GF(2^m) */ if (prim_poly == 0) prim_poly = prim_poly_tab[m-min_m]; bch = kzalloc(sizeof(*bch), GFP_KERNEL); if (bch == NULL) goto fail; bch->m = m; bch->t = t; bch->n = (1 << m)-1; words = DIV_ROUND_UP(m*t, 32); bch->ecc_bytes = DIV_ROUND_UP(m*t, 8); bch->a_pow_tab = bch_alloc((1+bch->n)*sizeof(*bch->a_pow_tab), &err); bch->a_log_tab = bch_alloc((1+bch->n)*sizeof(*bch->a_log_tab), &err); bch->mod8_tab = bch_alloc(words*1024*sizeof(*bch->mod8_tab), &err); bch->ecc_buf = bch_alloc(words*sizeof(*bch->ecc_buf), &err); bch->ecc_buf2 = bch_alloc(words*sizeof(*bch->ecc_buf2), &err); bch->xi_tab = bch_alloc(m*sizeof(*bch->xi_tab), &err); bch->syn = bch_alloc(2*t*sizeof(*bch->syn), &err); bch->cache = bch_alloc(2*t*sizeof(*bch->cache), &err); bch->elp = bch_alloc((t+1)*sizeof(struct gf_poly_deg1), &err); for (i = 0; i < ARRAY_SIZE(bch->poly_2t); i++) bch->poly_2t[i] = bch_alloc(GF_POLY_SZ(2*t), &err); if (err) goto fail; err = build_gf_tables(bch, prim_poly); if (err) goto fail; /* use generator polynomial for computing encoding tables */ genpoly = compute_generator_polynomial(bch); if (genpoly == NULL) goto fail; build_mod8_tables(bch, genpoly); kfree(genpoly); err = build_deg2_base(bch); if (err) goto fail; return bch; fail: free_bch(bch); return NULL; } EXPORT_SYMBOL_GPL(init_bch); /** * free_bch - free the BCH control structure * @bch: BCH control structure to release */ void free_bch(struct bch_control *bch) { unsigned int i; if (bch) { kfree(bch->a_pow_tab); kfree(bch->a_log_tab); kfree(bch->mod8_tab); kfree(bch->ecc_buf); kfree(bch->ecc_buf2); kfree(bch->xi_tab); kfree(bch->syn); kfree(bch->cache); kfree(bch->elp); for (i = 0; i < ARRAY_SIZE(bch->poly_2t); i++) kfree(bch->poly_2t[i]); kfree(bch); } } EXPORT_SYMBOL_GPL(free_bch); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>"); MODULE_DESCRIPTION("Binary BCH encoder/decoder");
gpl-2.0
TheWolfer22/android_kernel_lge_g3
drivers/net/fddi/skfp/queue.c
13133
4083
/****************************************************************************** * * (C)Copyright 1998,1999 SysKonnect, * a business unit of Schneider & Koch & Co. Datensysteme GmbH. * * See the file "skfddi.c" for further information. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * The information in this file is provided "AS IS" without warranty. * ******************************************************************************/ /* SMT Event Queue Management */ #include "h/types.h" #include "h/fddi.h" #include "h/smc.h" #ifndef lint static const char ID_sccs[] = "@(#)queue.c 2.9 97/08/04 (C) SK " ; #endif #define PRINTF(a,b,c) /* * init event queue management */ void ev_init(struct s_smc *smc) { smc->q.ev_put = smc->q.ev_get = smc->q.ev_queue ; } /* * add event to queue */ void queue_event(struct s_smc *smc, int class, int event) { PRINTF("queue class %d event %d\n",class,event) ; smc->q.ev_put->class = class ; smc->q.ev_put->event = event ; if (++smc->q.ev_put == &smc->q.ev_queue[MAX_EVENT]) smc->q.ev_put = smc->q.ev_queue ; if (smc->q.ev_put == smc->q.ev_get) { SMT_ERR_LOG(smc,SMT_E0137, SMT_E0137_MSG) ; } } /* * timer_event is called from HW timer package. */ void timer_event(struct s_smc *smc, u_long token) { PRINTF("timer event class %d token %d\n", EV_T_CLASS(token), EV_T_EVENT(token)) ; queue_event(smc,EV_T_CLASS(token),EV_T_EVENT(token)); } /* * event dispatcher * while event queue is not empty * get event from queue * send command to state machine * end */ void ev_dispatcher(struct s_smc *smc) { struct event_queue *ev ; /* pointer into queue */ int class ; ev = smc->q.ev_get ; PRINTF("dispatch get %x put %x\n",ev,smc->q.ev_put) ; while (ev != smc->q.ev_put) { PRINTF("dispatch class %d event %d\n",ev->class,ev->event) ; switch(class = ev->class) { case EVENT_ECM : /* Entity Corordination Man. */ ecm(smc,(int)ev->event) ; break ; case EVENT_CFM : /* Configuration Man. */ cfm(smc,(int)ev->event) ; break ; case EVENT_RMT : /* Ring Man. */ rmt(smc,(int)ev->event) ; break ; case EVENT_SMT : smt_event(smc,(int)ev->event) ; break ; #ifdef CONCENTRATOR case 99 : timer_test_event(smc,(int)ev->event) ; break ; #endif case EVENT_PCMA : /* PHY A */ case EVENT_PCMB : /* PHY B */ default : if (class >= EVENT_PCMA && class < EVENT_PCMA + NUMPHYS) { pcm(smc,class - EVENT_PCMA,(int)ev->event) ; break ; } SMT_PANIC(smc,SMT_E0121, SMT_E0121_MSG) ; return ; } if (++ev == &smc->q.ev_queue[MAX_EVENT]) ev = smc->q.ev_queue ; /* Renew get: it is used in queue_events to detect overruns */ smc->q.ev_get = ev; } } /* * smt_online connects to or disconnects from the ring * MUST be called to initiate connection establishment * * on 0 disconnect * on 1 connect */ u_short smt_online(struct s_smc *smc, int on) { queue_event(smc,EVENT_ECM,on ? EC_CONNECT : EC_DISCONNECT) ; ev_dispatcher(smc) ; return smc->mib.fddiSMTCF_State; } /* * set SMT flag to value * flag flag name * value flag value * dump current flag setting */ #ifdef CONCENTRATOR void do_smt_flag(struct s_smc *smc, char *flag, int value) { #ifdef DEBUG struct smt_debug *deb; SK_UNUSED(smc) ; #ifdef DEBUG_BRD deb = &smc->debug; #else deb = &debug; #endif if (!strcmp(flag,"smt")) deb->d_smt = value ; else if (!strcmp(flag,"smtf")) deb->d_smtf = value ; else if (!strcmp(flag,"pcm")) deb->d_pcm = value ; else if (!strcmp(flag,"rmt")) deb->d_rmt = value ; else if (!strcmp(flag,"cfm")) deb->d_cfm = value ; else if (!strcmp(flag,"ecm")) deb->d_ecm = value ; printf("smt %d\n",deb->d_smt) ; printf("smtf %d\n",deb->d_smtf) ; printf("pcm %d\n",deb->d_pcm) ; printf("rmt %d\n",deb->d_rmt) ; printf("cfm %d\n",deb->d_cfm) ; printf("ecm %d\n",deb->d_ecm) ; #endif /* DEBUG */ } #endif
gpl-2.0
FireHound-Devices/android_kernel_cyanogen_msm8916
drivers/video/atafb_iplan2p2.c
14925
6818
/* * linux/drivers/video/iplan2p2.c -- Low level frame buffer operations for * interleaved bitplanes à la Atari (2 * planes, 2 bytes interleave) * * Created 5 Apr 1997 by Geert Uytterhoeven * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/string.h> #include <linux/fb.h> #include <asm/setup.h> #include "atafb.h" #define BPL 2 #include "atafb_utils.h" void atafb_iplan2p2_copyarea(struct fb_info *info, u_long next_line, int sy, int sx, int dy, int dx, int height, int width) { /* bmove() has to distinguish two major cases: If both, source and * destination, start at even addresses or both are at odd * addresses, just the first odd and last even column (if present) * require special treatment (memmove_col()). The rest between * then can be copied by normal operations, because all adjacent * bytes are affected and are to be stored in the same order. * The pathological case is when the move should go from an odd * address to an even or vice versa. Since the bytes in the plane * words must be assembled in new order, it seems wisest to make * all movements by memmove_col(). */ u8 *src, *dst; u32 *s, *d; int w, l , i, j; u_int colsize; u_int upwards = (dy < sy) || (dy == sy && dx < sx); colsize = height; if (!((sx ^ dx) & 15)) { /* odd->odd or even->even */ if (upwards) { src = (u8 *)info->screen_base + sy * next_line + (sx & ~15) / (8 / BPL); dst = (u8 *)info->screen_base + dy * next_line + (dx & ~15) / (8 / BPL); if (sx & 15) { memmove32_col(dst, src, 0xff00ff, height, next_line - BPL * 2); src += BPL * 2; dst += BPL * 2; width -= 8; } w = width >> 4; if (w) { s = (u32 *)src; d = (u32 *)dst; w *= BPL / 2; l = next_line - w * 4; for (j = height; j > 0; j--) { for (i = w; i > 0; i--) *d++ = *s++; s = (u32 *)((u8 *)s + l); d = (u32 *)((u8 *)d + l); } } if (width & 15) memmove32_col(dst + width / (8 / BPL), src + width / (8 / BPL), 0xff00ff00, height, next_line - BPL * 2); } else { src = (u8 *)info->screen_base + (sy - 1) * next_line + ((sx + width + 8) & ~15) / (8 / BPL); dst = (u8 *)info->screen_base + (dy - 1) * next_line + ((dx + width + 8) & ~15) / (8 / BPL); if ((sx + width) & 15) { src -= BPL * 2; dst -= BPL * 2; memmove32_col(dst, src, 0xff00ff00, colsize, -next_line - BPL * 2); width -= 8; } w = width >> 4; if (w) { s = (u32 *)src; d = (u32 *)dst; w *= BPL / 2; l = next_line - w * 4; for (j = height; j > 0; j--) { for (i = w; i > 0; i--) *--d = *--s; s = (u32 *)((u8 *)s - l); d = (u32 *)((u8 *)d - l); } } if (sx & 15) memmove32_col(dst - (width - 16) / (8 / BPL), src - (width - 16) / (8 / BPL), 0xff00ff, colsize, -next_line - BPL * 2); } } else { /* odd->even or even->odd */ if (upwards) { u32 *src32, *dst32; u32 pval[4], v, v1, mask; int i, j, w, f; src = (u8 *)info->screen_base + sy * next_line + (sx & ~15) / (8 / BPL); dst = (u8 *)info->screen_base + dy * next_line + (dx & ~15) / (8 / BPL); mask = 0xff00ff00; f = 0; w = width; if (sx & 15) { f = 1; w += 8; } if ((sx + width) & 15) f |= 2; w >>= 4; for (i = height; i; i--) { src32 = (u32 *)src; dst32 = (u32 *)dst; if (f & 1) { pval[0] = (*src32++ << 8) & mask; } else { pval[0] = dst32[0] & mask; } for (j = w; j > 0; j--) { v = *src32++; v1 = v & mask; *dst32++ = pval[0] | (v1 >> 8); pval[0] = (v ^ v1) << 8; } if (f & 2) { dst32[0] = (dst32[0] & mask) | pval[0]; } src += next_line; dst += next_line; } } else { u32 *src32, *dst32; u32 pval[4], v, v1, mask; int i, j, w, f; src = (u8 *)info->screen_base + (sy - 1) * next_line + ((sx + width + 8) & ~15) / (8 / BPL); dst = (u8 *)info->screen_base + (dy - 1) * next_line + ((dx + width + 8) & ~15) / (8 / BPL); mask = 0xff00ff; f = 0; w = width; if ((dx + width) & 15) f = 1; if (sx & 15) { f |= 2; w += 8; } w >>= 4; for (i = height; i; i--) { src32 = (u32 *)src; dst32 = (u32 *)dst; if (f & 1) { pval[0] = dst32[-1] & mask; } else { pval[0] = (*--src32 >> 8) & mask; } for (j = w; j > 0; j--) { v = *--src32; v1 = v & mask; *--dst32 = pval[0] | (v1 << 8); pval[0] = (v ^ v1) >> 8; } if (!(f & 2)) { dst32[-1] = (dst32[-1] & mask) | pval[0]; } src -= next_line; dst -= next_line; } } } } void atafb_iplan2p2_fillrect(struct fb_info *info, u_long next_line, u32 color, int sy, int sx, int height, int width) { u32 *dest; int rows, i; u32 cval[4]; dest = (u32 *)(info->screen_base + sy * next_line + (sx & ~15) / (8 / BPL)); if (sx & 15) { u8 *dest8 = (u8 *)dest + 1; expand8_col2mask(color, cval); for (i = height; i; i--) { fill8_col(dest8, cval); dest8 += next_line; } dest += BPL / 2; width -= 8; } expand16_col2mask(color, cval); rows = width >> 4; if (rows) { u32 *d = dest; u32 off = next_line - rows * BPL * 2; for (i = height; i; i--) { d = fill16_col(d, rows, cval); d = (u32 *)((long)d + off); } dest += rows * BPL / 2; width &= 15; } if (width) { u8 *dest8 = (u8 *)dest; expand8_col2mask(color, cval); for (i = height; i; i--) { fill8_col(dest8, cval); dest8 += next_line; } } } void atafb_iplan2p2_linefill(struct fb_info *info, u_long next_line, int dy, int dx, u32 width, const u8 *data, u32 bgcolor, u32 fgcolor) { u32 *dest; const u16 *data16; int rows; u32 fgm[4], bgm[4], m; dest = (u32 *)(info->screen_base + dy * next_line + (dx & ~15) / (8 / BPL)); if (dx & 15) { fill8_2col((u8 *)dest + 1, fgcolor, bgcolor, *data++); dest += BPL / 2; width -= 8; } if (width >= 16) { data16 = (const u16 *)data; expand16_2col2mask(fgcolor, bgcolor, fgm, bgm); for (rows = width / 16; rows; rows--) { u16 d = *data16++; m = d | ((u32)d << 16); *dest++ = (m & fgm[0]) ^ bgm[0]; } data = (const u8 *)data16; width &= 15; } if (width) fill8_2col((u8 *)dest, fgcolor, bgcolor, *data); } #ifdef MODULE MODULE_LICENSE("GPL"); int init_module(void) { return 0; } void cleanup_module(void) { } #endif /* MODULE */ /* * Visible symbols for modules */ EXPORT_SYMBOL(atafb_iplan2p2_copyarea); EXPORT_SYMBOL(atafb_iplan2p2_fillrect); EXPORT_SYMBOL(atafb_iplan2p2_linefill);
gpl-2.0
BenRomer/unisys
drivers/acpi/acpica/rsmisc.c
78
20920
/******************************************************************************* * * Module Name: rsmisc - Miscellaneous resource descriptors * ******************************************************************************/ /* * Copyright (C) 2000 - 2016, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acresrc.h" #define _COMPONENT ACPI_RESOURCES ACPI_MODULE_NAME("rsmisc") #define INIT_RESOURCE_TYPE(i) i->resource_offset #define INIT_RESOURCE_LENGTH(i) i->aml_offset #define INIT_TABLE_LENGTH(i) i->value #define COMPARE_OPCODE(i) i->resource_offset #define COMPARE_TARGET(i) i->aml_offset #define COMPARE_VALUE(i) i->value /******************************************************************************* * * FUNCTION: acpi_rs_convert_aml_to_resource * * PARAMETERS: resource - Pointer to the resource descriptor * aml - Where the AML descriptor is returned * info - Pointer to appropriate conversion table * * RETURN: Status * * DESCRIPTION: Convert an external AML resource descriptor to the corresponding * internal resource descriptor * ******************************************************************************/ acpi_status acpi_rs_convert_aml_to_resource(struct acpi_resource *resource, union aml_resource *aml, struct acpi_rsconvert_info *info) { acpi_rs_length aml_resource_length; void *source; void *destination; char *target; u8 count; u8 flags_mode = FALSE; u16 item_count = 0; u16 temp16 = 0; ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource); if (!info) { return_ACPI_STATUS(AE_BAD_PARAMETER); } if (((acpi_size) resource) & 0x3) { /* Each internal resource struct is expected to be 32-bit aligned */ ACPI_WARNING((AE_INFO, "Misaligned resource pointer (get): %p Type 0x%2.2X Length %u", resource, resource->type, resource->length)); } /* Extract the resource Length field (does not include header length) */ aml_resource_length = acpi_ut_get_resource_length(aml); /* * First table entry must be ACPI_RSC_INITxxx and must contain the * table length (# of table entries) */ count = INIT_TABLE_LENGTH(info); while (count) { /* * Source is the external AML byte stream buffer, * destination is the internal resource descriptor */ source = ACPI_ADD_PTR(void, aml, info->aml_offset); destination = ACPI_ADD_PTR(void, resource, info->resource_offset); switch (info->opcode) { case ACPI_RSC_INITGET: /* * Get the resource type and the initial (minimum) length */ memset(resource, 0, INIT_RESOURCE_LENGTH(info)); resource->type = INIT_RESOURCE_TYPE(info); resource->length = INIT_RESOURCE_LENGTH(info); break; case ACPI_RSC_INITSET: break; case ACPI_RSC_FLAGINIT: flags_mode = TRUE; break; case ACPI_RSC_1BITFLAG: /* * Mask and shift the flag bit */ ACPI_SET8(destination, ((ACPI_GET8(source) >> info->value) & 0x01)); break; case ACPI_RSC_2BITFLAG: /* * Mask and shift the flag bits */ ACPI_SET8(destination, ((ACPI_GET8(source) >> info->value) & 0x03)); break; case ACPI_RSC_3BITFLAG: /* * Mask and shift the flag bits */ ACPI_SET8(destination, ((ACPI_GET8(source) >> info->value) & 0x07)); break; case ACPI_RSC_COUNT: item_count = ACPI_GET8(source); ACPI_SET8(destination, item_count); resource->length = resource->length + (info->value * (item_count - 1)); break; case ACPI_RSC_COUNT16: item_count = aml_resource_length; ACPI_SET16(destination, item_count); resource->length = resource->length + (info->value * (item_count - 1)); break; case ACPI_RSC_COUNT_GPIO_PIN: target = ACPI_ADD_PTR(void, aml, info->value); item_count = ACPI_GET16(target) - ACPI_GET16(source); resource->length = resource->length + item_count; item_count = item_count / 2; ACPI_SET16(destination, item_count); break; case ACPI_RSC_COUNT_GPIO_VEN: item_count = ACPI_GET8(source); ACPI_SET8(destination, item_count); resource->length = resource->length + (info->value * item_count); break; case ACPI_RSC_COUNT_GPIO_RES: /* * Vendor data is optional (length/offset may both be zero) * Examine vendor data length field first */ target = ACPI_ADD_PTR(void, aml, (info->value + 2)); if (ACPI_GET16(target)) { /* Use vendor offset to get resource source length */ target = ACPI_ADD_PTR(void, aml, info->value); item_count = ACPI_GET16(target) - ACPI_GET16(source); } else { /* No vendor data to worry about */ item_count = aml->large_header.resource_length + sizeof(struct aml_resource_large_header) - ACPI_GET16(source); } resource->length = resource->length + item_count; ACPI_SET16(destination, item_count); break; case ACPI_RSC_COUNT_SERIAL_VEN: item_count = ACPI_GET16(source) - info->value; resource->length = resource->length + item_count; ACPI_SET16(destination, item_count); break; case ACPI_RSC_COUNT_SERIAL_RES: item_count = (aml_resource_length + sizeof(struct aml_resource_large_header)) - ACPI_GET16(source) - info->value; resource->length = resource->length + item_count; ACPI_SET16(destination, item_count); break; case ACPI_RSC_LENGTH: resource->length = resource->length + info->value; break; case ACPI_RSC_MOVE8: case ACPI_RSC_MOVE16: case ACPI_RSC_MOVE32: case ACPI_RSC_MOVE64: /* * Raw data move. Use the Info value field unless item_count has * been previously initialized via a COUNT opcode */ if (info->value) { item_count = info->value; } acpi_rs_move_data(destination, source, item_count, info->opcode); break; case ACPI_RSC_MOVE_GPIO_PIN: /* Generate and set the PIN data pointer */ target = (char *)ACPI_ADD_PTR(void, resource, (resource->length - item_count * 2)); *(u16 **)destination = ACPI_CAST_PTR(u16, target); /* Copy the PIN data */ source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source)); acpi_rs_move_data(target, source, item_count, info->opcode); break; case ACPI_RSC_MOVE_GPIO_RES: /* Generate and set the resource_source string pointer */ target = (char *)ACPI_ADD_PTR(void, resource, (resource->length - item_count)); *(u8 **)destination = ACPI_CAST_PTR(u8, target); /* Copy the resource_source string */ source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source)); acpi_rs_move_data(target, source, item_count, info->opcode); break; case ACPI_RSC_MOVE_SERIAL_VEN: /* Generate and set the Vendor Data pointer */ target = (char *)ACPI_ADD_PTR(void, resource, (resource->length - item_count)); *(u8 **)destination = ACPI_CAST_PTR(u8, target); /* Copy the Vendor Data */ source = ACPI_ADD_PTR(void, aml, info->value); acpi_rs_move_data(target, source, item_count, info->opcode); break; case ACPI_RSC_MOVE_SERIAL_RES: /* Generate and set the resource_source string pointer */ target = (char *)ACPI_ADD_PTR(void, resource, (resource->length - item_count)); *(u8 **)destination = ACPI_CAST_PTR(u8, target); /* Copy the resource_source string */ source = ACPI_ADD_PTR(void, aml, (ACPI_GET16(source) + info->value)); acpi_rs_move_data(target, source, item_count, info->opcode); break; case ACPI_RSC_SET8: memset(destination, info->aml_offset, info->value); break; case ACPI_RSC_DATA8: target = ACPI_ADD_PTR(char, resource, info->value); memcpy(destination, source, ACPI_GET16(target)); break; case ACPI_RSC_ADDRESS: /* * Common handler for address descriptor flags */ if (!acpi_rs_get_address_common(resource, aml)) { return_ACPI_STATUS (AE_AML_INVALID_RESOURCE_TYPE); } break; case ACPI_RSC_SOURCE: /* * Optional resource_source (Index and String) */ resource->length += acpi_rs_get_resource_source(aml_resource_length, info->value, destination, aml, NULL); break; case ACPI_RSC_SOURCEX: /* * Optional resource_source (Index and String). This is the more * complicated case used by the Interrupt() macro */ target = ACPI_ADD_PTR(char, resource, info->aml_offset + (item_count * 4)); resource->length += acpi_rs_get_resource_source(aml_resource_length, (acpi_rs_length) (((item_count - 1) * sizeof(u32)) + info->value), destination, aml, target); break; case ACPI_RSC_BITMASK: /* * 8-bit encoded bitmask (DMA macro) */ item_count = acpi_rs_decode_bitmask(ACPI_GET8(source), destination); if (item_count) { resource->length += (item_count - 1); } target = ACPI_ADD_PTR(char, resource, info->value); ACPI_SET8(target, item_count); break; case ACPI_RSC_BITMASK16: /* * 16-bit encoded bitmask (IRQ macro) */ ACPI_MOVE_16_TO_16(&temp16, source); item_count = acpi_rs_decode_bitmask(temp16, destination); if (item_count) { resource->length += (item_count - 1); } target = ACPI_ADD_PTR(char, resource, info->value); ACPI_SET8(target, item_count); break; case ACPI_RSC_EXIT_NE: /* * control - Exit conversion if not equal */ switch (info->resource_offset) { case ACPI_RSC_COMPARE_AML_LENGTH: if (aml_resource_length != info->value) { goto exit; } break; case ACPI_RSC_COMPARE_VALUE: if (ACPI_GET8(source) != info->value) { goto exit; } break; default: ACPI_ERROR((AE_INFO, "Invalid conversion sub-opcode")); return_ACPI_STATUS(AE_BAD_PARAMETER); } break; default: ACPI_ERROR((AE_INFO, "Invalid conversion opcode")); return_ACPI_STATUS(AE_BAD_PARAMETER); } count--; info++; } exit: if (!flags_mode) { /* Round the resource struct length up to the next boundary (32 or 64) */ resource->length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(resource->length); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_rs_convert_resource_to_aml * * PARAMETERS: resource - Pointer to the resource descriptor * aml - Where the AML descriptor is returned * info - Pointer to appropriate conversion table * * RETURN: Status * * DESCRIPTION: Convert an internal resource descriptor to the corresponding * external AML resource descriptor. * ******************************************************************************/ acpi_status acpi_rs_convert_resource_to_aml(struct acpi_resource *resource, union aml_resource *aml, struct acpi_rsconvert_info *info) { void *source = NULL; void *destination; char *target; acpi_rsdesc_size aml_length = 0; u8 count; u16 temp16 = 0; u16 item_count = 0; ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml); if (!info) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * First table entry must be ACPI_RSC_INITxxx and must contain the * table length (# of table entries) */ count = INIT_TABLE_LENGTH(info); while (count) { /* * Source is the internal resource descriptor, * destination is the external AML byte stream buffer */ source = ACPI_ADD_PTR(void, resource, info->resource_offset); destination = ACPI_ADD_PTR(void, aml, info->aml_offset); switch (info->opcode) { case ACPI_RSC_INITSET: memset(aml, 0, INIT_RESOURCE_LENGTH(info)); aml_length = INIT_RESOURCE_LENGTH(info); acpi_rs_set_resource_header(INIT_RESOURCE_TYPE(info), aml_length, aml); break; case ACPI_RSC_INITGET: break; case ACPI_RSC_FLAGINIT: /* * Clear the flag byte */ ACPI_SET8(destination, 0); break; case ACPI_RSC_1BITFLAG: /* * Mask and shift the flag bit */ ACPI_SET_BIT(*ACPI_CAST8(destination), (u8) ((ACPI_GET8(source) & 0x01) << info-> value)); break; case ACPI_RSC_2BITFLAG: /* * Mask and shift the flag bits */ ACPI_SET_BIT(*ACPI_CAST8(destination), (u8) ((ACPI_GET8(source) & 0x03) << info-> value)); break; case ACPI_RSC_3BITFLAG: /* * Mask and shift the flag bits */ ACPI_SET_BIT(*ACPI_CAST8(destination), (u8) ((ACPI_GET8(source) & 0x07) << info-> value)); break; case ACPI_RSC_COUNT: item_count = ACPI_GET8(source); ACPI_SET8(destination, item_count); aml_length = (u16) (aml_length + (info->value * (item_count - 1))); break; case ACPI_RSC_COUNT16: item_count = ACPI_GET16(source); aml_length = (u16) (aml_length + item_count); acpi_rs_set_resource_length(aml_length, aml); break; case ACPI_RSC_COUNT_GPIO_PIN: item_count = ACPI_GET16(source); ACPI_SET16(destination, aml_length); aml_length = (u16)(aml_length + item_count * 2); target = ACPI_ADD_PTR(void, aml, info->value); ACPI_SET16(target, aml_length); acpi_rs_set_resource_length(aml_length, aml); break; case ACPI_RSC_COUNT_GPIO_VEN: item_count = ACPI_GET16(source); ACPI_SET16(destination, item_count); aml_length = (u16)(aml_length + (info->value * item_count)); acpi_rs_set_resource_length(aml_length, aml); break; case ACPI_RSC_COUNT_GPIO_RES: /* Set resource source string length */ item_count = ACPI_GET16(source); ACPI_SET16(destination, aml_length); /* Compute offset for the Vendor Data */ aml_length = (u16)(aml_length + item_count); target = ACPI_ADD_PTR(void, aml, info->value); /* Set vendor offset only if there is vendor data */ if (resource->data.gpio.vendor_length) { ACPI_SET16(target, aml_length); } acpi_rs_set_resource_length(aml_length, aml); break; case ACPI_RSC_COUNT_SERIAL_VEN: item_count = ACPI_GET16(source); ACPI_SET16(destination, item_count + info->value); aml_length = (u16)(aml_length + item_count); acpi_rs_set_resource_length(aml_length, aml); break; case ACPI_RSC_COUNT_SERIAL_RES: item_count = ACPI_GET16(source); aml_length = (u16)(aml_length + item_count); acpi_rs_set_resource_length(aml_length, aml); break; case ACPI_RSC_LENGTH: acpi_rs_set_resource_length(info->value, aml); break; case ACPI_RSC_MOVE8: case ACPI_RSC_MOVE16: case ACPI_RSC_MOVE32: case ACPI_RSC_MOVE64: if (info->value) { item_count = info->value; } acpi_rs_move_data(destination, source, item_count, info->opcode); break; case ACPI_RSC_MOVE_GPIO_PIN: destination = (char *)ACPI_ADD_PTR(void, aml, ACPI_GET16 (destination)); source = *(u16 **)source; acpi_rs_move_data(destination, source, item_count, info->opcode); break; case ACPI_RSC_MOVE_GPIO_RES: /* Used for both resource_source string and vendor_data */ destination = (char *)ACPI_ADD_PTR(void, aml, ACPI_GET16 (destination)); source = *(u8 **)source; acpi_rs_move_data(destination, source, item_count, info->opcode); break; case ACPI_RSC_MOVE_SERIAL_VEN: destination = (char *)ACPI_ADD_PTR(void, aml, (aml_length - item_count)); source = *(u8 **)source; acpi_rs_move_data(destination, source, item_count, info->opcode); break; case ACPI_RSC_MOVE_SERIAL_RES: destination = (char *)ACPI_ADD_PTR(void, aml, (aml_length - item_count)); source = *(u8 **)source; acpi_rs_move_data(destination, source, item_count, info->opcode); break; case ACPI_RSC_ADDRESS: /* Set the Resource Type, General Flags, and Type-Specific Flags */ acpi_rs_set_address_common(aml, resource); break; case ACPI_RSC_SOURCEX: /* * Optional resource_source (Index and String) */ aml_length = acpi_rs_set_resource_source(aml, (acpi_rs_length) aml_length, source); acpi_rs_set_resource_length(aml_length, aml); break; case ACPI_RSC_SOURCE: /* * Optional resource_source (Index and String). This is the more * complicated case used by the Interrupt() macro */ aml_length = acpi_rs_set_resource_source(aml, info->value, source); acpi_rs_set_resource_length(aml_length, aml); break; case ACPI_RSC_BITMASK: /* * 8-bit encoded bitmask (DMA macro) */ ACPI_SET8(destination, acpi_rs_encode_bitmask(source, *ACPI_ADD_PTR(u8, resource, info-> value))); break; case ACPI_RSC_BITMASK16: /* * 16-bit encoded bitmask (IRQ macro) */ temp16 = acpi_rs_encode_bitmask(source, *ACPI_ADD_PTR(u8, resource, info->value)); ACPI_MOVE_16_TO_16(destination, &temp16); break; case ACPI_RSC_EXIT_LE: /* * control - Exit conversion if less than or equal */ if (item_count <= info->value) { goto exit; } break; case ACPI_RSC_EXIT_NE: /* * control - Exit conversion if not equal */ switch (COMPARE_OPCODE(info)) { case ACPI_RSC_COMPARE_VALUE: if (*ACPI_ADD_PTR(u8, resource, COMPARE_TARGET(info)) != COMPARE_VALUE(info)) { goto exit; } break; default: ACPI_ERROR((AE_INFO, "Invalid conversion sub-opcode")); return_ACPI_STATUS(AE_BAD_PARAMETER); } break; case ACPI_RSC_EXIT_EQ: /* * control - Exit conversion if equal */ if (*ACPI_ADD_PTR(u8, resource, COMPARE_TARGET(info)) == COMPARE_VALUE(info)) { goto exit; } break; default: ACPI_ERROR((AE_INFO, "Invalid conversion opcode")); return_ACPI_STATUS(AE_BAD_PARAMETER); } count--; info++; } exit: return_ACPI_STATUS(AE_OK); } #if 0 /* Previous resource validations */ if (aml->ext_address64.revision_ID != AML_RESOURCE_EXTENDED_ADDRESS_REVISION) { return_ACPI_STATUS(AE_SUPPORT); } if (resource->data.start_dpf.performance_robustness >= 3) { return_ACPI_STATUS(AE_AML_BAD_RESOURCE_VALUE); } if (((aml->irq.flags & 0x09) == 0x00) || ((aml->irq.flags & 0x09) == 0x09)) { /* * Only [active_high, edge_sensitive] or [active_low, level_sensitive] * polarity/trigger interrupts are allowed (ACPI spec, section * "IRQ Format"), so 0x00 and 0x09 are illegal. */ ACPI_ERROR((AE_INFO, "Invalid interrupt polarity/trigger in resource list, 0x%X", aml->irq.flags)); return_ACPI_STATUS(AE_BAD_DATA); } resource->data.extended_irq.interrupt_count = temp8; if (temp8 < 1) { /* Must have at least one IRQ */ return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH); } if (resource->data.dma.transfer == 0x03) { ACPI_ERROR((AE_INFO, "Invalid DMA.Transfer preference (3)")); return_ACPI_STATUS(AE_BAD_DATA); } #endif
gpl-2.0
Pingmin/linux
drivers/scsi/scsi_logging.c
78
11769
// SPDX-License-Identifier: GPL-2.0-only /* * scsi_logging.c * * Copyright (C) 2014 SUSE Linux Products GmbH * Copyright (C) 2014 Hannes Reinecke <hare@suse.de> */ #include <linux/kernel.h> #include <linux/atomic.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dbg.h> static char *scsi_log_reserve_buffer(size_t *len) { *len = 128; return kmalloc(*len, GFP_ATOMIC); } static void scsi_log_release_buffer(char *bufptr) { kfree(bufptr); } static inline const char *scmd_name(const struct scsi_cmnd *scmd) { return scmd->request->rq_disk ? scmd->request->rq_disk->disk_name : NULL; } static size_t sdev_format_header(char *logbuf, size_t logbuf_len, const char *name, int tag) { size_t off = 0; if (name) off += scnprintf(logbuf + off, logbuf_len - off, "[%s] ", name); if (WARN_ON(off >= logbuf_len)) return off; if (tag >= 0) off += scnprintf(logbuf + off, logbuf_len - off, "tag#%d ", tag); return off; } void sdev_prefix_printk(const char *level, const struct scsi_device *sdev, const char *name, const char *fmt, ...) { va_list args; char *logbuf; size_t off = 0, logbuf_len; if (!sdev) return; logbuf = scsi_log_reserve_buffer(&logbuf_len); if (!logbuf) return; if (name) off += scnprintf(logbuf + off, logbuf_len - off, "[%s] ", name); if (!WARN_ON(off >= logbuf_len)) { va_start(args, fmt); off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args); va_end(args); } dev_printk(level, &sdev->sdev_gendev, "%s", logbuf); scsi_log_release_buffer(logbuf); } EXPORT_SYMBOL(sdev_prefix_printk); void scmd_printk(const char *level, const struct scsi_cmnd *scmd, const char *fmt, ...) { va_list args; char *logbuf; size_t off = 0, logbuf_len; if (!scmd || !scmd->cmnd) return; logbuf = scsi_log_reserve_buffer(&logbuf_len); if (!logbuf) return; off = sdev_format_header(logbuf, logbuf_len, scmd_name(scmd), scmd->request->tag); if (off < logbuf_len) { va_start(args, fmt); off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args); va_end(args); } dev_printk(level, &scmd->device->sdev_gendev, "%s", logbuf); scsi_log_release_buffer(logbuf); } EXPORT_SYMBOL(scmd_printk); static size_t scsi_format_opcode_name(char *buffer, size_t buf_len, const unsigned char *cdbp) { int sa, cdb0; const char *cdb_name = NULL, *sa_name = NULL; size_t off; cdb0 = cdbp[0]; if (cdb0 == VARIABLE_LENGTH_CMD) { int len = scsi_varlen_cdb_length(cdbp); if (len < 10) { off = scnprintf(buffer, buf_len, "short variable length command, len=%d", len); return off; } sa = (cdbp[8] << 8) + cdbp[9]; } else sa = cdbp[1] & 0x1f; if (!scsi_opcode_sa_name(cdb0, sa, &cdb_name, &sa_name)) { if (cdb_name) off = scnprintf(buffer, buf_len, "%s", cdb_name); else { off = scnprintf(buffer, buf_len, "opcode=0x%x", cdb0); if (WARN_ON(off >= buf_len)) return off; if (cdb0 >= VENDOR_SPECIFIC_CDB) off += scnprintf(buffer + off, buf_len - off, " (vendor)"); else if (cdb0 >= 0x60 && cdb0 < 0x7e) off += scnprintf(buffer + off, buf_len - off, " (reserved)"); } } else { if (sa_name) off = scnprintf(buffer, buf_len, "%s", sa_name); else if (cdb_name) off = scnprintf(buffer, buf_len, "%s, sa=0x%x", cdb_name, sa); else off = scnprintf(buffer, buf_len, "opcode=0x%x, sa=0x%x", cdb0, sa); } WARN_ON(off >= buf_len); return off; } size_t __scsi_format_command(char *logbuf, size_t logbuf_len, const unsigned char *cdb, size_t cdb_len) { int len, k; size_t off; off = scsi_format_opcode_name(logbuf, logbuf_len, cdb); if (off >= logbuf_len) return off; len = scsi_command_size(cdb); if (cdb_len < len) len = cdb_len; /* print out all bytes in cdb */ for (k = 0; k < len; ++k) { if (off > logbuf_len - 3) break; off += scnprintf(logbuf + off, logbuf_len - off, " %02x", cdb[k]); } return off; } EXPORT_SYMBOL(__scsi_format_command); void scsi_print_command(struct scsi_cmnd *cmd) { int k; char *logbuf; size_t off, logbuf_len; if (!cmd->cmnd) return; logbuf = scsi_log_reserve_buffer(&logbuf_len); if (!logbuf) return; off = sdev_format_header(logbuf, logbuf_len, scmd_name(cmd), cmd->request->tag); if (off >= logbuf_len) goto out_printk; off += scnprintf(logbuf + off, logbuf_len - off, "CDB: "); if (WARN_ON(off >= logbuf_len)) goto out_printk; off += scsi_format_opcode_name(logbuf + off, logbuf_len - off, cmd->cmnd); if (off >= logbuf_len) goto out_printk; /* print out all bytes in cdb */ if (cmd->cmd_len > 16) { /* Print opcode in one line and use separate lines for CDB */ off += scnprintf(logbuf + off, logbuf_len - off, "\n"); dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf); scsi_log_release_buffer(logbuf); for (k = 0; k < cmd->cmd_len; k += 16) { size_t linelen = min(cmd->cmd_len - k, 16); logbuf = scsi_log_reserve_buffer(&logbuf_len); if (!logbuf) break; off = sdev_format_header(logbuf, logbuf_len, scmd_name(cmd), cmd->request->tag); if (!WARN_ON(off > logbuf_len - 58)) { off += scnprintf(logbuf + off, logbuf_len - off, "CDB[%02x]: ", k); hex_dump_to_buffer(&cmd->cmnd[k], linelen, 16, 1, logbuf + off, logbuf_len - off, false); } dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf); scsi_log_release_buffer(logbuf); } return; } if (!WARN_ON(off > logbuf_len - 49)) { off += scnprintf(logbuf + off, logbuf_len - off, " "); hex_dump_to_buffer(cmd->cmnd, cmd->cmd_len, 16, 1, logbuf + off, logbuf_len - off, false); } out_printk: dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf); scsi_log_release_buffer(logbuf); } EXPORT_SYMBOL(scsi_print_command); static size_t scsi_format_extd_sense(char *buffer, size_t buf_len, unsigned char asc, unsigned char ascq) { size_t off = 0; const char *extd_sense_fmt = NULL; const char *extd_sense_str = scsi_extd_sense_format(asc, ascq, &extd_sense_fmt); if (extd_sense_str) { off = scnprintf(buffer, buf_len, "Add. Sense: %s", extd_sense_str); if (extd_sense_fmt) off += scnprintf(buffer + off, buf_len - off, "(%s%x)", extd_sense_fmt, ascq); } else { if (asc >= 0x80) off = scnprintf(buffer, buf_len, "<<vendor>>"); off += scnprintf(buffer + off, buf_len - off, "ASC=0x%x ", asc); if (ascq >= 0x80) off += scnprintf(buffer + off, buf_len - off, "<<vendor>>"); off += scnprintf(buffer + off, buf_len - off, "ASCQ=0x%x ", ascq); } return off; } static size_t scsi_format_sense_hdr(char *buffer, size_t buf_len, const struct scsi_sense_hdr *sshdr) { const char *sense_txt; size_t off; off = scnprintf(buffer, buf_len, "Sense Key : "); sense_txt = scsi_sense_key_string(sshdr->sense_key); if (sense_txt) off += scnprintf(buffer + off, buf_len - off, "%s ", sense_txt); else off += scnprintf(buffer + off, buf_len - off, "0x%x ", sshdr->sense_key); off += scnprintf(buffer + off, buf_len - off, scsi_sense_is_deferred(sshdr) ? "[deferred] " : "[current] "); if (sshdr->response_code >= 0x72) off += scnprintf(buffer + off, buf_len - off, "[descriptor] "); return off; } static void scsi_log_dump_sense(const struct scsi_device *sdev, const char *name, int tag, const unsigned char *sense_buffer, int sense_len) { char *logbuf; size_t logbuf_len; int i; logbuf = scsi_log_reserve_buffer(&logbuf_len); if (!logbuf) return; for (i = 0; i < sense_len; i += 16) { int len = min(sense_len - i, 16); size_t off; off = sdev_format_header(logbuf, logbuf_len, name, tag); hex_dump_to_buffer(&sense_buffer[i], len, 16, 1, logbuf + off, logbuf_len - off, false); dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf); } scsi_log_release_buffer(logbuf); } static void scsi_log_print_sense_hdr(const struct scsi_device *sdev, const char *name, int tag, const struct scsi_sense_hdr *sshdr) { char *logbuf; size_t off, logbuf_len; logbuf = scsi_log_reserve_buffer(&logbuf_len); if (!logbuf) return; off = sdev_format_header(logbuf, logbuf_len, name, tag); off += scsi_format_sense_hdr(logbuf + off, logbuf_len - off, sshdr); dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf); scsi_log_release_buffer(logbuf); logbuf = scsi_log_reserve_buffer(&logbuf_len); if (!logbuf) return; off = sdev_format_header(logbuf, logbuf_len, name, tag); off += scsi_format_extd_sense(logbuf + off, logbuf_len - off, sshdr->asc, sshdr->ascq); dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf); scsi_log_release_buffer(logbuf); } static void scsi_log_print_sense(const struct scsi_device *sdev, const char *name, int tag, const unsigned char *sense_buffer, int sense_len) { struct scsi_sense_hdr sshdr; if (scsi_normalize_sense(sense_buffer, sense_len, &sshdr)) scsi_log_print_sense_hdr(sdev, name, tag, &sshdr); else scsi_log_dump_sense(sdev, name, tag, sense_buffer, sense_len); } /* * Print normalized SCSI sense header with a prefix. */ void scsi_print_sense_hdr(const struct scsi_device *sdev, const char *name, const struct scsi_sense_hdr *sshdr) { scsi_log_print_sense_hdr(sdev, name, -1, sshdr); } EXPORT_SYMBOL(scsi_print_sense_hdr); /* Normalize and print sense buffer with name prefix */ void __scsi_print_sense(const struct scsi_device *sdev, const char *name, const unsigned char *sense_buffer, int sense_len) { scsi_log_print_sense(sdev, name, -1, sense_buffer, sense_len); } EXPORT_SYMBOL(__scsi_print_sense); /* Normalize and print sense buffer in SCSI command */ void scsi_print_sense(const struct scsi_cmnd *cmd) { scsi_log_print_sense(cmd->device, scmd_name(cmd), cmd->request->tag, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); } EXPORT_SYMBOL(scsi_print_sense); void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg, int disposition) { char *logbuf; size_t off, logbuf_len; const char *mlret_string = scsi_mlreturn_string(disposition); const char *hb_string = scsi_hostbyte_string(cmd->result); const char *db_string = scsi_driverbyte_string(cmd->result); unsigned long cmd_age = (jiffies - cmd->jiffies_at_alloc) / HZ; logbuf = scsi_log_reserve_buffer(&logbuf_len); if (!logbuf) return; off = sdev_format_header(logbuf, logbuf_len, scmd_name(cmd), cmd->request->tag); if (off >= logbuf_len) goto out_printk; if (msg) { off += scnprintf(logbuf + off, logbuf_len - off, "%s: ", msg); if (WARN_ON(off >= logbuf_len)) goto out_printk; } if (mlret_string) off += scnprintf(logbuf + off, logbuf_len - off, "%s ", mlret_string); else off += scnprintf(logbuf + off, logbuf_len - off, "UNKNOWN(0x%02x) ", disposition); if (WARN_ON(off >= logbuf_len)) goto out_printk; off += scnprintf(logbuf + off, logbuf_len - off, "Result: "); if (WARN_ON(off >= logbuf_len)) goto out_printk; if (hb_string) off += scnprintf(logbuf + off, logbuf_len - off, "hostbyte=%s ", hb_string); else off += scnprintf(logbuf + off, logbuf_len - off, "hostbyte=0x%02x ", host_byte(cmd->result)); if (WARN_ON(off >= logbuf_len)) goto out_printk; if (db_string) off += scnprintf(logbuf + off, logbuf_len - off, "driverbyte=%s ", db_string); else off += scnprintf(logbuf + off, logbuf_len - off, "driverbyte=0x%02x ", driver_byte(cmd->result)); off += scnprintf(logbuf + off, logbuf_len - off, "cmd_age=%lus", cmd_age); out_printk: dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf); scsi_log_release_buffer(logbuf); } EXPORT_SYMBOL(scsi_print_result);
gpl-2.0
GAXUSXX/G935FGaXusKernel3
drivers/pinctrl/spear/pinctrl-spear320.c
334
91542
/* * Driver for the ST Microelectronics SPEAr320 pinmux * * Copyright (C) 2012 ST Microelectronics * Viresh Kumar <viresh.linux@gmail.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/err.h> #include <linux/init.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include "pinctrl-spear3xx.h" #define DRIVER_NAME "spear320-pinmux" /* addresses */ #define PMX_CONFIG_REG 0x0C #define MODE_CONFIG_REG 0x10 #define MODE_EXT_CONFIG_REG 0x18 /* modes */ #define AUTO_NET_SMII_MODE (1 << 0) #define AUTO_NET_MII_MODE (1 << 1) #define AUTO_EXP_MODE (1 << 2) #define SMALL_PRINTERS_MODE (1 << 3) #define EXTENDED_MODE (1 << 4) static struct spear_pmx_mode pmx_mode_auto_net_smii = { .name = "Automation Networking SMII mode", .mode = AUTO_NET_SMII_MODE, .reg = MODE_CONFIG_REG, .mask = 0x00000007, .val = 0x0, }; static struct spear_pmx_mode pmx_mode_auto_net_mii = { .name = "Automation Networking MII mode", .mode = AUTO_NET_MII_MODE, .reg = MODE_CONFIG_REG, .mask = 0x00000007, .val = 0x1, }; static struct spear_pmx_mode pmx_mode_auto_exp = { .name = "Automation Expanded mode", .mode = AUTO_EXP_MODE, .reg = MODE_CONFIG_REG, .mask = 0x00000007, .val = 0x2, }; static struct spear_pmx_mode pmx_mode_small_printers = { .name = "Small Printers mode", .mode = SMALL_PRINTERS_MODE, .reg = MODE_CONFIG_REG, .mask = 0x00000007, .val = 0x3, }; static struct spear_pmx_mode pmx_mode_extended = { .name = "extended mode", .mode = EXTENDED_MODE, .reg = MODE_EXT_CONFIG_REG, .mask = 0x00000001, .val = 0x1, }; static struct spear_pmx_mode *spear320_pmx_modes[] = { &pmx_mode_auto_net_smii, &pmx_mode_auto_net_mii, &pmx_mode_auto_exp, &pmx_mode_small_printers, &pmx_mode_extended, }; /* Extended mode registers and their offsets */ #define EXT_CTRL_REG 0x0018 #define MII_MDIO_MASK (1 << 4) #define MII_MDIO_10_11_VAL 0 #define MII_MDIO_81_VAL (1 << 4) #define EMI_FSMC_DYNAMIC_MUX_MASK (1 << 5) #define MAC_MODE_MII 0 #define MAC_MODE_RMII 1 #define MAC_MODE_SMII 2 #define MAC_MODE_SS_SMII 3 #define MAC_MODE_MASK 0x3 #define MAC1_MODE_SHIFT 16 #define MAC2_MODE_SHIFT 18 #define IP_SEL_PAD_0_9_REG 0x00A4 #define PMX_PL_0_1_MASK (0x3F << 0) #define PMX_UART2_PL_0_1_VAL 0x0 #define PMX_I2C2_PL_0_1_VAL (0x4 | (0x4 << 3)) #define PMX_PL_2_3_MASK (0x3F << 6) #define PMX_I2C2_PL_2_3_VAL 0x0 #define PMX_UART6_PL_2_3_VAL ((0x1 << 6) | (0x1 << 9)) #define PMX_UART1_ENH_PL_2_3_VAL ((0x4 << 6) | (0x4 << 9)) #define PMX_PL_4_5_MASK (0x3F << 12) #define PMX_UART5_PL_4_5_VAL ((0x1 << 12) | (0x1 << 15)) #define PMX_UART1_ENH_PL_4_5_VAL ((0x4 << 12) | (0x4 << 15)) #define PMX_PL_5_MASK (0x7 << 15) #define PMX_TOUCH_Y_PL_5_VAL 0x0 #define PMX_PL_6_7_MASK (0x3F << 18) #define PMX_PL_6_MASK (0x7 << 18) #define PMX_PL_7_MASK (0x7 << 21) #define PMX_UART4_PL_6_7_VAL ((0x1 << 18) | (0x1 << 21)) #define PMX_PWM_3_PL_6_VAL (0x2 << 18) #define PMX_PWM_2_PL_7_VAL (0x2 << 21) #define PMX_UART1_ENH_PL_6_7_VAL ((0x4 << 18) | (0x4 << 21)) #define PMX_PL_8_9_MASK (0x3F << 24) #define PMX_UART3_PL_8_9_VAL ((0x1 << 24) | (0x1 << 27)) #define PMX_PWM_0_1_PL_8_9_VAL ((0x2 << 24) | (0x2 << 27)) #define PMX_I2C1_PL_8_9_VAL ((0x4 << 24) | (0x4 << 27)) #define IP_SEL_PAD_10_19_REG 0x00A8 #define PMX_PL_10_11_MASK (0x3F << 0) #define PMX_SMII_PL_10_11_VAL 0 #define PMX_RMII_PL_10_11_VAL ((0x4 << 0) | (0x4 << 3)) #define PMX_PL_12_MASK (0x7 << 6) #define PMX_PWM3_PL_12_VAL 0 #define PMX_SDHCI_CD_PL_12_VAL (0x4 << 6) #define PMX_PL_13_14_MASK (0x3F << 9) #define PMX_PL_13_MASK (0x7 << 9) #define PMX_PL_14_MASK (0x7 << 12) #define PMX_SSP2_PL_13_14_15_16_VAL 0 #define PMX_UART4_PL_13_14_VAL ((0x1 << 9) | (0x1 << 12)) #define PMX_RMII_PL_13_14_VAL ((0x4 << 9) | (0x4 << 12)) #define PMX_PWM2_PL_13_VAL (0x2 << 9) #define PMX_PWM1_PL_14_VAL (0x2 << 12) #define PMX_PL_15_MASK (0x7 << 15) #define PMX_PWM0_PL_15_VAL (0x2 << 15) #define PMX_PL_15_16_MASK (0x3F << 15) #define PMX_UART3_PL_15_16_VAL ((0x1 << 15) | (0x1 << 18)) #define PMX_RMII_PL_15_16_VAL ((0x4 << 15) | (0x4 << 18)) #define PMX_PL_17_18_MASK (0x3F << 21) #define PMX_SSP1_PL_17_18_19_20_VAL 0 #define PMX_RMII_PL_17_18_VAL ((0x4 << 21) | (0x4 << 24)) #define PMX_PL_19_MASK (0x7 << 27) #define PMX_I2C2_PL_19_VAL (0x1 << 27) #define PMX_RMII_PL_19_VAL (0x4 << 27) #define IP_SEL_PAD_20_29_REG 0x00AC #define PMX_PL_20_MASK (0x7 << 0) #define PMX_I2C2_PL_20_VAL (0x1 << 0) #define PMX_RMII_PL_20_VAL (0x4 << 0) #define PMX_PL_21_TO_27_MASK (0x1FFFFF << 3) #define PMX_SMII_PL_21_TO_27_VAL 0 #define PMX_RMII_PL_21_TO_27_VAL ((0x4 << 3) | (0x4 << 6) | (0x4 << 9) | (0x4 << 12) | (0x4 << 15) | (0x4 << 18) | (0x4 << 21)) #define PMX_PL_28_29_MASK (0x3F << 24) #define PMX_PL_28_MASK (0x7 << 24) #define PMX_PL_29_MASK (0x7 << 27) #define PMX_UART1_PL_28_29_VAL 0 #define PMX_PWM_3_PL_28_VAL (0x4 << 24) #define PMX_PWM_2_PL_29_VAL (0x4 << 27) #define IP_SEL_PAD_30_39_REG 0x00B0 #define PMX_PL_30_31_MASK (0x3F << 0) #define PMX_CAN1_PL_30_31_VAL (0) #define PMX_PL_30_MASK (0x7 << 0) #define PMX_PL_31_MASK (0x7 << 3) #define PMX_PWM1_EXT_PL_30_VAL (0x4 << 0) #define PMX_PWM0_EXT_PL_31_VAL (0x4 << 3) #define PMX_UART1_ENH_PL_31_VAL (0x3 << 3) #define PMX_PL_32_33_MASK (0x3F << 6) #define PMX_CAN0_PL_32_33_VAL 0 #define PMX_UART1_ENH_PL_32_33_VAL ((0x3 << 6) | (0x3 << 9)) #define PMX_SSP2_PL_32_33_VAL ((0x4 << 6) | (0x4 << 9)) #define PMX_PL_34_MASK (0x7 << 12) #define PMX_PWM2_PL_34_VAL 0 #define PMX_UART1_ENH_PL_34_VAL (0x2 << 12) #define PMX_SSP2_PL_34_VAL (0x4 << 12) #define PMX_PL_35_MASK (0x7 << 15) #define PMX_I2S_REF_CLK_PL_35_VAL 0 #define PMX_UART1_ENH_PL_35_VAL (0x2 << 15) #define PMX_SSP2_PL_35_VAL (0x4 << 15) #define PMX_PL_36_MASK (0x7 << 18) #define PMX_TOUCH_X_PL_36_VAL 0 #define PMX_UART1_ENH_PL_36_VAL (0x2 << 18) #define PMX_SSP1_PL_36_VAL (0x4 << 18) #define PMX_PL_37_38_MASK (0x3F << 21) #define PMX_PWM0_1_PL_37_38_VAL 0 #define PMX_UART5_PL_37_38_VAL ((0x2 << 21) | (0x2 << 24)) #define PMX_SSP1_PL_37_38_VAL ((0x4 << 21) | (0x4 << 24)) #define PMX_PL_39_MASK (0x7 << 27) #define PMX_I2S_PL_39_VAL 0 #define PMX_UART4_PL_39_VAL (0x2 << 27) #define PMX_SSP1_PL_39_VAL (0x4 << 27) #define IP_SEL_PAD_40_49_REG 0x00B4 #define PMX_PL_40_MASK (0x7 << 0) #define PMX_I2S_PL_40_VAL 0 #define PMX_UART4_PL_40_VAL (0x2 << 0) #define PMX_PWM3_PL_40_VAL (0x4 << 0) #define PMX_PL_41_42_MASK (0x3F << 3) #define PMX_PL_41_MASK (0x7 << 3) #define PMX_PL_42_MASK (0x7 << 6) #define PMX_I2S_PL_41_42_VAL 0 #define PMX_UART3_PL_41_42_VAL ((0x2 << 3) | (0x2 << 6)) #define PMX_PWM2_PL_41_VAL (0x4 << 3) #define PMX_PWM1_PL_42_VAL (0x4 << 6) #define PMX_PL_43_MASK (0x7 << 9) #define PMX_SDHCI_PL_43_VAL 0 #define PMX_UART1_ENH_PL_43_VAL (0x2 << 9) #define PMX_PWM0_PL_43_VAL (0x4 << 9) #define PMX_PL_44_45_MASK (0x3F << 12) #define PMX_SDHCI_PL_44_45_VAL 0 #define PMX_UART1_ENH_PL_44_45_VAL ((0x2 << 12) | (0x2 << 15)) #define PMX_SSP2_PL_44_45_VAL ((0x4 << 12) | (0x4 << 15)) #define PMX_PL_46_47_MASK (0x3F << 18) #define PMX_SDHCI_PL_46_47_VAL 0 #define PMX_FSMC_EMI_PL_46_47_VAL ((0x2 << 18) | (0x2 << 21)) #define PMX_SSP2_PL_46_47_VAL ((0x4 << 18) | (0x4 << 21)) #define PMX_PL_48_49_MASK (0x3F << 24) #define PMX_SDHCI_PL_48_49_VAL 0 #define PMX_FSMC_EMI_PL_48_49_VAL ((0x2 << 24) | (0x2 << 27)) #define PMX_SSP1_PL_48_49_VAL ((0x4 << 24) | (0x4 << 27)) #define IP_SEL_PAD_50_59_REG 0x00B8 #define PMX_PL_50_51_MASK (0x3F << 0) #define PMX_EMI_PL_50_51_VAL ((0x2 << 0) | (0x2 << 3)) #define PMX_SSP1_PL_50_51_VAL ((0x4 << 0) | (0x4 << 3)) #define PMX_PL_50_MASK (0x7 << 0) #define PMX_PL_51_MASK (0x7 << 3) #define PMX_SDHCI_PL_50_VAL 0 #define PMX_SDHCI_CD_PL_51_VAL 0 #define PMX_PL_52_53_MASK (0x3F << 6) #define PMX_FSMC_PL_52_53_VAL 0 #define PMX_EMI_PL_52_53_VAL ((0x2 << 6) | (0x2 << 9)) #define PMX_UART3_PL_52_53_VAL ((0x4 << 6) | (0x4 << 9)) #define PMX_PL_54_55_56_MASK (0x1FF << 12) #define PMX_FSMC_EMI_PL_54_55_56_VAL ((0x2 << 12) | (0x2 << 15) | (0x2 << 18)) #define PMX_PL_57_MASK (0x7 << 21) #define PMX_FSMC_PL_57_VAL 0 #define PMX_PWM3_PL_57_VAL (0x4 << 21) #define PMX_PL_58_59_MASK (0x3F << 24) #define PMX_PL_58_MASK (0x7 << 24) #define PMX_PL_59_MASK (0x7 << 27) #define PMX_FSMC_EMI_PL_58_59_VAL ((0x2 << 24) | (0x2 << 27)) #define PMX_PWM2_PL_58_VAL (0x4 << 24) #define PMX_PWM1_PL_59_VAL (0x4 << 27) #define IP_SEL_PAD_60_69_REG 0x00BC #define PMX_PL_60_MASK (0x7 << 0) #define PMX_FSMC_PL_60_VAL 0 #define PMX_PWM0_PL_60_VAL (0x4 << 0) #define PMX_PL_61_TO_64_MASK (0xFFF << 3) #define PMX_FSMC_PL_61_TO_64_VAL ((0x2 << 3) | (0x2 << 6) | (0x2 << 9) | (0x2 << 12)) #define PMX_SSP2_PL_61_TO_64_VAL ((0x4 << 3) | (0x4 << 6) | (0x4 << 9) | (0x4 << 12)) #define PMX_PL_65_TO_68_MASK (0xFFF << 15) #define PMX_FSMC_PL_65_TO_68_VAL ((0x2 << 15) | (0x2 << 18) | (0x2 << 21) | (0x2 << 24)) #define PMX_SSP1_PL_65_TO_68_VAL ((0x4 << 15) | (0x4 << 18) | (0x4 << 21) | (0x4 << 24)) #define PMX_PL_69_MASK (0x7 << 27) #define PMX_CLCD_PL_69_VAL (0) #define PMX_EMI_PL_69_VAL (0x2 << 27) #define PMX_SPP_PL_69_VAL (0x3 << 27) #define PMX_UART5_PL_69_VAL (0x4 << 27) #define IP_SEL_PAD_70_79_REG 0x00C0 #define PMX_PL_70_MASK (0x7 << 0) #define PMX_CLCD_PL_70_VAL (0) #define PMX_FSMC_EMI_PL_70_VAL (0x2 << 0) #define PMX_SPP_PL_70_VAL (0x3 << 0) #define PMX_UART5_PL_70_VAL (0x4 << 0) #define PMX_PL_71_72_MASK (0x3F << 3) #define PMX_CLCD_PL_71_72_VAL (0) #define PMX_FSMC_EMI_PL_71_72_VAL ((0x2 << 3) | (0x2 << 6)) #define PMX_SPP_PL_71_72_VAL ((0x3 << 3) | (0x3 << 6)) #define PMX_UART4_PL_71_72_VAL ((0x4 << 3) | (0x4 << 6)) #define PMX_PL_73_MASK (0x7 << 9) #define PMX_CLCD_PL_73_VAL (0) #define PMX_FSMC_EMI_PL_73_VAL (0x2 << 9) #define PMX_SPP_PL_73_VAL (0x3 << 9) #define PMX_UART3_PL_73_VAL (0x4 << 9) #define PMX_PL_74_MASK (0x7 << 12) #define PMX_CLCD_PL_74_VAL (0) #define PMX_EMI_PL_74_VAL (0x2 << 12) #define PMX_SPP_PL_74_VAL (0x3 << 12) #define PMX_UART3_PL_74_VAL (0x4 << 12) #define PMX_PL_75_76_MASK (0x3F << 15) #define PMX_CLCD_PL_75_76_VAL (0) #define PMX_EMI_PL_75_76_VAL ((0x2 << 15) | (0x2 << 18)) #define PMX_SPP_PL_75_76_VAL ((0x3 << 15) | (0x3 << 18)) #define PMX_I2C2_PL_75_76_VAL ((0x4 << 15) | (0x4 << 18)) #define PMX_PL_77_78_79_MASK (0x1FF << 21) #define PMX_CLCD_PL_77_78_79_VAL (0) #define PMX_EMI_PL_77_78_79_VAL ((0x2 << 21) | (0x2 << 24) | (0x2 << 27)) #define PMX_SPP_PL_77_78_79_VAL ((0x3 << 21) | (0x3 << 24) | (0x3 << 27)) #define PMX_RS485_PL_77_78_79_VAL ((0x4 << 21) | (0x4 << 24) | (0x4 << 27)) #define IP_SEL_PAD_80_89_REG 0x00C4 #define PMX_PL_80_TO_85_MASK (0x3FFFF << 0) #define PMX_CLCD_PL_80_TO_85_VAL 0 #define PMX_MII2_PL_80_TO_85_VAL ((0x1 << 0) | (0x1 << 3) | (0x1 << 6) | (0x1 << 9) | (0x1 << 12) | (0x1 << 15)) #define PMX_EMI_PL_80_TO_85_VAL ((0x2 << 0) | (0x2 << 3) | (0x2 << 6) | (0x2 << 9) | (0x2 << 12) | (0x2 << 15)) #define PMX_SPP_PL_80_TO_85_VAL ((0x3 << 0) | (0x3 << 3) | (0x3 << 6) | (0x3 << 9) | (0x3 << 12) | (0x3 << 15)) #define PMX_UART1_ENH_PL_80_TO_85_VAL ((0x4 << 0) | (0x4 << 3) | (0x4 << 6) | (0x4 << 9) | (0x4 << 12) | (0x4 << 15)) #define PMX_PL_86_87_MASK (0x3F << 18) #define PMX_PL_86_MASK (0x7 << 18) #define PMX_PL_87_MASK (0x7 << 21) #define PMX_CLCD_PL_86_87_VAL 0 #define PMX_MII2_PL_86_87_VAL ((0x1 << 18) | (0x1 << 21)) #define PMX_EMI_PL_86_87_VAL ((0x2 << 18) | (0x2 << 21)) #define PMX_PWM3_PL_86_VAL (0x4 << 18) #define PMX_PWM2_PL_87_VAL (0x4 << 21) #define PMX_PL_88_89_MASK (0x3F << 24) #define PMX_CLCD_PL_88_89_VAL 0 #define PMX_MII2_PL_88_89_VAL ((0x1 << 24) | (0x1 << 27)) #define PMX_EMI_PL_88_89_VAL ((0x2 << 24) | (0x2 << 27)) #define PMX_UART6_PL_88_89_VAL ((0x3 << 24) | (0x3 << 27)) #define PMX_PWM0_1_PL_88_89_VAL ((0x4 << 24) | (0x4 << 27)) #define IP_SEL_PAD_90_99_REG 0x00C8 #define PMX_PL_90_91_MASK (0x3F << 0) #define PMX_CLCD_PL_90_91_VAL 0 #define PMX_MII2_PL_90_91_VAL ((0x1 << 0) | (0x1 << 3)) #define PMX_EMI1_PL_90_91_VAL ((0x2 << 0) | (0x2 << 3)) #define PMX_UART5_PL_90_91_VAL ((0x3 << 0) | (0x3 << 3)) #define PMX_SSP2_PL_90_91_VAL ((0x4 << 0) | (0x4 << 3)) #define PMX_PL_92_93_MASK (0x3F << 6) #define PMX_CLCD_PL_92_93_VAL 0 #define PMX_MII2_PL_92_93_VAL ((0x1 << 6) | (0x1 << 9)) #define PMX_EMI1_PL_92_93_VAL ((0x2 << 6) | (0x2 << 9)) #define PMX_UART4_PL_92_93_VAL ((0x3 << 6) | (0x3 << 9)) #define PMX_SSP2_PL_92_93_VAL ((0x4 << 6) | (0x4 << 9)) #define PMX_PL_94_95_MASK (0x3F << 12) #define PMX_CLCD_PL_94_95_VAL 0 #define PMX_MII2_PL_94_95_VAL ((0x1 << 12) | (0x1 << 15)) #define PMX_EMI1_PL_94_95_VAL ((0x2 << 12) | (0x2 << 15)) #define PMX_UART3_PL_94_95_VAL ((0x3 << 12) | (0x3 << 15)) #define PMX_SSP1_PL_94_95_VAL ((0x4 << 12) | (0x4 << 15)) #define PMX_PL_96_97_MASK (0x3F << 18) #define PMX_CLCD_PL_96_97_VAL 0 #define PMX_MII2_PL_96_97_VAL ((0x1 << 18) | (0x1 << 21)) #define PMX_EMI1_PL_96_97_VAL ((0x2 << 18) | (0x2 << 21)) #define PMX_I2C2_PL_96_97_VAL ((0x3 << 18) | (0x3 << 21)) #define PMX_SSP1_PL_96_97_VAL ((0x4 << 18) | (0x4 << 21)) #define PMX_PL_98_MASK (0x7 << 24) #define PMX_CLCD_PL_98_VAL 0 #define PMX_I2C1_PL_98_VAL (0x2 << 24) #define PMX_UART3_PL_98_VAL (0x4 << 24) #define PMX_PL_99_MASK (0x7 << 27) #define PMX_SDHCI_PL_99_VAL 0 #define PMX_I2C1_PL_99_VAL (0x2 << 27) #define PMX_UART3_PL_99_VAL (0x4 << 27) #define IP_SEL_MIX_PAD_REG 0x00CC #define PMX_PL_100_101_MASK (0x3F << 0) #define PMX_SDHCI_PL_100_101_VAL 0 #define PMX_UART4_PL_100_101_VAL ((0x4 << 0) | (0x4 << 3)) #define PMX_SSP1_PORT_SEL_MASK (0x7 << 8) #define PMX_SSP1_PORT_94_TO_97_VAL 0 #define PMX_SSP1_PORT_65_TO_68_VAL (0x1 << 8) #define PMX_SSP1_PORT_48_TO_51_VAL (0x2 << 8) #define PMX_SSP1_PORT_36_TO_39_VAL (0x3 << 8) #define PMX_SSP1_PORT_17_TO_20_VAL (0x4 << 8) #define PMX_SSP2_PORT_SEL_MASK (0x7 << 11) #define PMX_SSP2_PORT_90_TO_93_VAL 0 #define PMX_SSP2_PORT_61_TO_64_VAL (0x1 << 11) #define PMX_SSP2_PORT_44_TO_47_VAL (0x2 << 11) #define PMX_SSP2_PORT_32_TO_35_VAL (0x3 << 11) #define PMX_SSP2_PORT_13_TO_16_VAL (0x4 << 11) #define PMX_UART1_ENH_PORT_SEL_MASK (0x3 << 14) #define PMX_UART1_ENH_PORT_81_TO_85_VAL 0 #define PMX_UART1_ENH_PORT_44_45_34_36_VAL (0x1 << 14) #define PMX_UART1_ENH_PORT_32_TO_34_36_VAL (0x2 << 14) #define PMX_UART1_ENH_PORT_3_TO_5_7_VAL (0x3 << 14) #define PMX_UART3_PORT_SEL_MASK (0x7 << 16) #define PMX_UART3_PORT_94_VAL 0 #define PMX_UART3_PORT_73_VAL (0x1 << 16) #define PMX_UART3_PORT_52_VAL (0x2 << 16) #define PMX_UART3_PORT_41_VAL (0x3 << 16) #define PMX_UART3_PORT_15_VAL (0x4 << 16) #define PMX_UART3_PORT_8_VAL (0x5 << 16) #define PMX_UART3_PORT_99_VAL (0x6 << 16) #define PMX_UART4_PORT_SEL_MASK (0x7 << 19) #define PMX_UART4_PORT_92_VAL 0 #define PMX_UART4_PORT_71_VAL (0x1 << 19) #define PMX_UART4_PORT_39_VAL (0x2 << 19) #define PMX_UART4_PORT_13_VAL (0x3 << 19) #define PMX_UART4_PORT_6_VAL (0x4 << 19) #define PMX_UART4_PORT_101_VAL (0x5 << 19) #define PMX_UART5_PORT_SEL_MASK (0x3 << 22) #define PMX_UART5_PORT_90_VAL 0 #define PMX_UART5_PORT_69_VAL (0x1 << 22) #define PMX_UART5_PORT_37_VAL (0x2 << 22) #define PMX_UART5_PORT_4_VAL (0x3 << 22) #define PMX_UART6_PORT_SEL_MASK (0x1 << 24) #define PMX_UART6_PORT_88_VAL 0 #define PMX_UART6_PORT_2_VAL (0x1 << 24) #define PMX_I2C1_PORT_SEL_MASK (0x1 << 25) #define PMX_I2C1_PORT_8_9_VAL 0 #define PMX_I2C1_PORT_98_99_VAL (0x1 << 25) #define PMX_I2C2_PORT_SEL_MASK (0x3 << 26) #define PMX_I2C2_PORT_96_97_VAL 0 #define PMX_I2C2_PORT_75_76_VAL (0x1 << 26) #define PMX_I2C2_PORT_19_20_VAL (0x2 << 26) #define PMX_I2C2_PORT_2_3_VAL (0x3 << 26) #define PMX_I2C2_PORT_0_1_VAL (0x4 << 26) #define PMX_SDHCI_CD_PORT_SEL_MASK (0x1 << 29) #define PMX_SDHCI_CD_PORT_12_VAL 0 #define PMX_SDHCI_CD_PORT_51_VAL (0x1 << 29) /* Pad multiplexing for CLCD device */ static const unsigned clcd_pins[] = { 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97 }; static struct spear_muxreg clcd_muxreg[] = { { .reg = IP_SEL_PAD_60_69_REG, .mask = PMX_PL_69_MASK, .val = PMX_CLCD_PL_69_VAL, }, { .reg = IP_SEL_PAD_70_79_REG, .mask = PMX_PL_70_MASK | PMX_PL_71_72_MASK | PMX_PL_73_MASK | PMX_PL_74_MASK | PMX_PL_75_76_MASK | PMX_PL_77_78_79_MASK, .val = PMX_CLCD_PL_70_VAL | PMX_CLCD_PL_71_72_VAL | PMX_CLCD_PL_73_VAL | PMX_CLCD_PL_74_VAL | PMX_CLCD_PL_75_76_VAL | PMX_CLCD_PL_77_78_79_VAL, }, { .reg = IP_SEL_PAD_80_89_REG, .mask = PMX_PL_80_TO_85_MASK | PMX_PL_86_87_MASK | PMX_PL_88_89_MASK, .val = PMX_CLCD_PL_80_TO_85_VAL | PMX_CLCD_PL_86_87_VAL | PMX_CLCD_PL_88_89_VAL, }, { .reg = IP_SEL_PAD_90_99_REG, .mask = PMX_PL_90_91_MASK | PMX_PL_92_93_MASK | PMX_PL_94_95_MASK | PMX_PL_96_97_MASK | PMX_PL_98_MASK, .val = PMX_CLCD_PL_90_91_VAL | PMX_CLCD_PL_92_93_VAL | PMX_CLCD_PL_94_95_VAL | PMX_CLCD_PL_96_97_VAL | PMX_CLCD_PL_98_VAL, }, }; static struct spear_modemux clcd_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = clcd_muxreg, .nmuxregs = ARRAY_SIZE(clcd_muxreg), }, }; static struct spear_pingroup clcd_pingroup = { .name = "clcd_grp", .pins = clcd_pins, .npins = ARRAY_SIZE(clcd_pins), .modemuxs = clcd_modemux, .nmodemuxs = ARRAY_SIZE(clcd_modemux), }; static const char *const clcd_grps[] = { "clcd_grp" }; static struct spear_function clcd_function = { .name = "clcd", .groups = clcd_grps, .ngroups = ARRAY_SIZE(clcd_grps), }; /* Pad multiplexing for EMI (Parallel NOR flash) device */ static const unsigned emi_pins[] = { 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97 }; static struct spear_muxreg emi_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_TIMER_0_1_MASK | PMX_TIMER_2_3_MASK, .val = 0, }, }; static struct spear_muxreg emi_ext_muxreg[] = { { .reg = IP_SEL_PAD_40_49_REG, .mask = PMX_PL_46_47_MASK | PMX_PL_48_49_MASK, .val = PMX_FSMC_EMI_PL_46_47_VAL | PMX_FSMC_EMI_PL_48_49_VAL, }, { .reg = IP_SEL_PAD_50_59_REG, .mask = PMX_PL_50_51_MASK | PMX_PL_52_53_MASK | PMX_PL_54_55_56_MASK | PMX_PL_58_59_MASK, .val = PMX_EMI_PL_50_51_VAL | PMX_EMI_PL_52_53_VAL | PMX_FSMC_EMI_PL_54_55_56_VAL | PMX_FSMC_EMI_PL_58_59_VAL, }, { .reg = IP_SEL_PAD_60_69_REG, .mask = PMX_PL_69_MASK, .val = PMX_EMI_PL_69_VAL, }, { .reg = IP_SEL_PAD_70_79_REG, .mask = PMX_PL_70_MASK | PMX_PL_71_72_MASK | PMX_PL_73_MASK | PMX_PL_74_MASK | PMX_PL_75_76_MASK | PMX_PL_77_78_79_MASK, .val = PMX_FSMC_EMI_PL_70_VAL | PMX_FSMC_EMI_PL_71_72_VAL | PMX_FSMC_EMI_PL_73_VAL | PMX_EMI_PL_74_VAL | PMX_EMI_PL_75_76_VAL | PMX_EMI_PL_77_78_79_VAL, }, { .reg = IP_SEL_PAD_80_89_REG, .mask = PMX_PL_80_TO_85_MASK | PMX_PL_86_87_MASK | PMX_PL_88_89_MASK, .val = PMX_EMI_PL_80_TO_85_VAL | PMX_EMI_PL_86_87_VAL | PMX_EMI_PL_88_89_VAL, }, { .reg = IP_SEL_PAD_90_99_REG, .mask = PMX_PL_90_91_MASK | PMX_PL_92_93_MASK | PMX_PL_94_95_MASK | PMX_PL_96_97_MASK, .val = PMX_EMI1_PL_90_91_VAL | PMX_EMI1_PL_92_93_VAL | PMX_EMI1_PL_94_95_VAL | PMX_EMI1_PL_96_97_VAL, }, { .reg = EXT_CTRL_REG, .mask = EMI_FSMC_DYNAMIC_MUX_MASK, .val = EMI_FSMC_DYNAMIC_MUX_MASK, }, }; static struct spear_modemux emi_modemux[] = { { .modes = AUTO_EXP_MODE | EXTENDED_MODE, .muxregs = emi_muxreg, .nmuxregs = ARRAY_SIZE(emi_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = emi_ext_muxreg, .nmuxregs = ARRAY_SIZE(emi_ext_muxreg), }, }; static struct spear_pingroup emi_pingroup = { .name = "emi_grp", .pins = emi_pins, .npins = ARRAY_SIZE(emi_pins), .modemuxs = emi_modemux, .nmodemuxs = ARRAY_SIZE(emi_modemux), }; static const char *const emi_grps[] = { "emi_grp" }; static struct spear_function emi_function = { .name = "emi", .groups = emi_grps, .ngroups = ARRAY_SIZE(emi_grps), }; /* Pad multiplexing for FSMC (NAND flash) device */ static const unsigned fsmc_8bit_pins[] = { 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68 }; static struct spear_muxreg fsmc_8bit_muxreg[] = { { .reg = IP_SEL_PAD_50_59_REG, .mask = PMX_PL_52_53_MASK | PMX_PL_54_55_56_MASK | PMX_PL_57_MASK | PMX_PL_58_59_MASK, .val = PMX_FSMC_PL_52_53_VAL | PMX_FSMC_EMI_PL_54_55_56_VAL | PMX_FSMC_PL_57_VAL | PMX_FSMC_EMI_PL_58_59_VAL, }, { .reg = IP_SEL_PAD_60_69_REG, .mask = PMX_PL_60_MASK | PMX_PL_61_TO_64_MASK | PMX_PL_65_TO_68_MASK, .val = PMX_FSMC_PL_60_VAL | PMX_FSMC_PL_61_TO_64_VAL | PMX_FSMC_PL_65_TO_68_VAL, }, { .reg = EXT_CTRL_REG, .mask = EMI_FSMC_DYNAMIC_MUX_MASK, .val = EMI_FSMC_DYNAMIC_MUX_MASK, }, }; static struct spear_modemux fsmc_8bit_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = fsmc_8bit_muxreg, .nmuxregs = ARRAY_SIZE(fsmc_8bit_muxreg), }, }; static struct spear_pingroup fsmc_8bit_pingroup = { .name = "fsmc_8bit_grp", .pins = fsmc_8bit_pins, .npins = ARRAY_SIZE(fsmc_8bit_pins), .modemuxs = fsmc_8bit_modemux, .nmodemuxs = ARRAY_SIZE(fsmc_8bit_modemux), }; static const unsigned fsmc_16bit_pins[] = { 46, 47, 48, 49, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 70, 71, 72, 73 }; static struct spear_muxreg fsmc_16bit_autoexp_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_TIMER_0_1_MASK | PMX_TIMER_2_3_MASK, .val = 0, }, }; static struct spear_muxreg fsmc_16bit_muxreg[] = { { .reg = IP_SEL_PAD_40_49_REG, .mask = PMX_PL_46_47_MASK | PMX_PL_48_49_MASK, .val = PMX_FSMC_EMI_PL_46_47_VAL | PMX_FSMC_EMI_PL_48_49_VAL, }, { .reg = IP_SEL_PAD_70_79_REG, .mask = PMX_PL_70_MASK | PMX_PL_71_72_MASK | PMX_PL_73_MASK, .val = PMX_FSMC_EMI_PL_70_VAL | PMX_FSMC_EMI_PL_71_72_VAL | PMX_FSMC_EMI_PL_73_VAL, } }; static struct spear_modemux fsmc_16bit_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = fsmc_8bit_muxreg, .nmuxregs = ARRAY_SIZE(fsmc_8bit_muxreg), }, { .modes = AUTO_EXP_MODE | EXTENDED_MODE, .muxregs = fsmc_16bit_autoexp_muxreg, .nmuxregs = ARRAY_SIZE(fsmc_16bit_autoexp_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = fsmc_16bit_muxreg, .nmuxregs = ARRAY_SIZE(fsmc_16bit_muxreg), }, }; static struct spear_pingroup fsmc_16bit_pingroup = { .name = "fsmc_16bit_grp", .pins = fsmc_16bit_pins, .npins = ARRAY_SIZE(fsmc_16bit_pins), .modemuxs = fsmc_16bit_modemux, .nmodemuxs = ARRAY_SIZE(fsmc_16bit_modemux), }; static const char *const fsmc_grps[] = { "fsmc_8bit_grp", "fsmc_16bit_grp" }; static struct spear_function fsmc_function = { .name = "fsmc", .groups = fsmc_grps, .ngroups = ARRAY_SIZE(fsmc_grps), }; /* Pad multiplexing for SPP device */ static const unsigned spp_pins[] = { 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85 }; static struct spear_muxreg spp_muxreg[] = { { .reg = IP_SEL_PAD_60_69_REG, .mask = PMX_PL_69_MASK, .val = PMX_SPP_PL_69_VAL, }, { .reg = IP_SEL_PAD_70_79_REG, .mask = PMX_PL_70_MASK | PMX_PL_71_72_MASK | PMX_PL_73_MASK | PMX_PL_74_MASK | PMX_PL_75_76_MASK | PMX_PL_77_78_79_MASK, .val = PMX_SPP_PL_70_VAL | PMX_SPP_PL_71_72_VAL | PMX_SPP_PL_73_VAL | PMX_SPP_PL_74_VAL | PMX_SPP_PL_75_76_VAL | PMX_SPP_PL_77_78_79_VAL, }, { .reg = IP_SEL_PAD_80_89_REG, .mask = PMX_PL_80_TO_85_MASK, .val = PMX_SPP_PL_80_TO_85_VAL, }, }; static struct spear_modemux spp_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = spp_muxreg, .nmuxregs = ARRAY_SIZE(spp_muxreg), }, }; static struct spear_pingroup spp_pingroup = { .name = "spp_grp", .pins = spp_pins, .npins = ARRAY_SIZE(spp_pins), .modemuxs = spp_modemux, .nmodemuxs = ARRAY_SIZE(spp_modemux), }; static const char *const spp_grps[] = { "spp_grp" }; static struct spear_function spp_function = { .name = "spp", .groups = spp_grps, .ngroups = ARRAY_SIZE(spp_grps), }; /* Pad multiplexing for SDHCI device */ static const unsigned sdhci_led_pins[] = { 34 }; static struct spear_muxreg sdhci_led_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_SSP_CS_MASK, .val = 0, }, }; static struct spear_muxreg sdhci_led_ext_muxreg[] = { { .reg = IP_SEL_PAD_30_39_REG, .mask = PMX_PL_34_MASK, .val = PMX_PWM2_PL_34_VAL, }, }; static struct spear_modemux sdhci_led_modemux[] = { { .modes = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE | EXTENDED_MODE, .muxregs = sdhci_led_muxreg, .nmuxregs = ARRAY_SIZE(sdhci_led_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = sdhci_led_ext_muxreg, .nmuxregs = ARRAY_SIZE(sdhci_led_ext_muxreg), }, }; static struct spear_pingroup sdhci_led_pingroup = { .name = "sdhci_led_grp", .pins = sdhci_led_pins, .npins = ARRAY_SIZE(sdhci_led_pins), .modemuxs = sdhci_led_modemux, .nmodemuxs = ARRAY_SIZE(sdhci_led_modemux), }; static const unsigned sdhci_cd_12_pins[] = { 12, 43, 44, 45, 46, 47, 48, 49, 50}; static const unsigned sdhci_cd_51_pins[] = { 43, 44, 45, 46, 47, 48, 49, 50, 51 }; static struct spear_muxreg sdhci_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_TIMER_0_1_MASK | PMX_TIMER_2_3_MASK, .val = 0, }, }; static struct spear_muxreg sdhci_ext_muxreg[] = { { .reg = IP_SEL_PAD_40_49_REG, .mask = PMX_PL_43_MASK | PMX_PL_44_45_MASK | PMX_PL_46_47_MASK | PMX_PL_48_49_MASK, .val = PMX_SDHCI_PL_43_VAL | PMX_SDHCI_PL_44_45_VAL | PMX_SDHCI_PL_46_47_VAL | PMX_SDHCI_PL_48_49_VAL, }, { .reg = IP_SEL_PAD_50_59_REG, .mask = PMX_PL_50_MASK, .val = PMX_SDHCI_PL_50_VAL, }, { .reg = IP_SEL_PAD_90_99_REG, .mask = PMX_PL_99_MASK, .val = PMX_SDHCI_PL_99_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_PL_100_101_MASK, .val = PMX_SDHCI_PL_100_101_VAL, }, }; static struct spear_muxreg sdhci_cd_12_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_MII_MASK, .val = 0, }, { .reg = IP_SEL_PAD_10_19_REG, .mask = PMX_PL_12_MASK, .val = PMX_SDHCI_CD_PL_12_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_SDHCI_CD_PORT_SEL_MASK, .val = PMX_SDHCI_CD_PORT_12_VAL, }, }; static struct spear_muxreg sdhci_cd_51_muxreg[] = { { .reg = IP_SEL_PAD_50_59_REG, .mask = PMX_PL_51_MASK, .val = PMX_SDHCI_CD_PL_51_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_SDHCI_CD_PORT_SEL_MASK, .val = PMX_SDHCI_CD_PORT_51_VAL, }, }; #define pmx_sdhci_common_modemux \ { \ .modes = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE | \ SMALL_PRINTERS_MODE | EXTENDED_MODE, \ .muxregs = sdhci_muxreg, \ .nmuxregs = ARRAY_SIZE(sdhci_muxreg), \ }, { \ .modes = EXTENDED_MODE, \ .muxregs = sdhci_ext_muxreg, \ .nmuxregs = ARRAY_SIZE(sdhci_ext_muxreg), \ } static struct spear_modemux sdhci_modemux[][3] = { { /* select pin 12 for cd */ pmx_sdhci_common_modemux, { .modes = EXTENDED_MODE, .muxregs = sdhci_cd_12_muxreg, .nmuxregs = ARRAY_SIZE(sdhci_cd_12_muxreg), }, }, { /* select pin 51 for cd */ pmx_sdhci_common_modemux, { .modes = EXTENDED_MODE, .muxregs = sdhci_cd_51_muxreg, .nmuxregs = ARRAY_SIZE(sdhci_cd_51_muxreg), }, } }; static struct spear_pingroup sdhci_pingroup[] = { { .name = "sdhci_cd_12_grp", .pins = sdhci_cd_12_pins, .npins = ARRAY_SIZE(sdhci_cd_12_pins), .modemuxs = sdhci_modemux[0], .nmodemuxs = ARRAY_SIZE(sdhci_modemux[0]), }, { .name = "sdhci_cd_51_grp", .pins = sdhci_cd_51_pins, .npins = ARRAY_SIZE(sdhci_cd_51_pins), .modemuxs = sdhci_modemux[1], .nmodemuxs = ARRAY_SIZE(sdhci_modemux[1]), }, }; static const char *const sdhci_grps[] = { "sdhci_cd_12_grp", "sdhci_cd_51_grp", "sdhci_led_grp" }; static struct spear_function sdhci_function = { .name = "sdhci", .groups = sdhci_grps, .ngroups = ARRAY_SIZE(sdhci_grps), }; /* Pad multiplexing for I2S device */ static const unsigned i2s_pins[] = { 35, 39, 40, 41, 42 }; static struct spear_muxreg i2s_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_SSP_CS_MASK, .val = 0, }, { .reg = PMX_CONFIG_REG, .mask = PMX_UART0_MODEM_MASK, .val = 0, }, }; static struct spear_muxreg i2s_ext_muxreg[] = { { .reg = IP_SEL_PAD_30_39_REG, .mask = PMX_PL_35_MASK | PMX_PL_39_MASK, .val = PMX_I2S_REF_CLK_PL_35_VAL | PMX_I2S_PL_39_VAL, }, { .reg = IP_SEL_PAD_40_49_REG, .mask = PMX_PL_40_MASK | PMX_PL_41_42_MASK, .val = PMX_I2S_PL_40_VAL | PMX_I2S_PL_41_42_VAL, }, }; static struct spear_modemux i2s_modemux[] = { { .modes = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE | EXTENDED_MODE, .muxregs = i2s_muxreg, .nmuxregs = ARRAY_SIZE(i2s_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = i2s_ext_muxreg, .nmuxregs = ARRAY_SIZE(i2s_ext_muxreg), }, }; static struct spear_pingroup i2s_pingroup = { .name = "i2s_grp", .pins = i2s_pins, .npins = ARRAY_SIZE(i2s_pins), .modemuxs = i2s_modemux, .nmodemuxs = ARRAY_SIZE(i2s_modemux), }; static const char *const i2s_grps[] = { "i2s_grp" }; static struct spear_function i2s_function = { .name = "i2s", .groups = i2s_grps, .ngroups = ARRAY_SIZE(i2s_grps), }; /* Pad multiplexing for UART1 device */ static const unsigned uart1_pins[] = { 28, 29 }; static struct spear_muxreg uart1_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_GPIO_PIN0_MASK | PMX_GPIO_PIN1_MASK, .val = 0, }, }; static struct spear_muxreg uart1_ext_muxreg[] = { { .reg = IP_SEL_PAD_20_29_REG, .mask = PMX_PL_28_29_MASK, .val = PMX_UART1_PL_28_29_VAL, }, }; static struct spear_modemux uart1_modemux[] = { { .modes = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE | AUTO_EXP_MODE | SMALL_PRINTERS_MODE | EXTENDED_MODE, .muxregs = uart1_muxreg, .nmuxregs = ARRAY_SIZE(uart1_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = uart1_ext_muxreg, .nmuxregs = ARRAY_SIZE(uart1_ext_muxreg), }, }; static struct spear_pingroup uart1_pingroup = { .name = "uart1_grp", .pins = uart1_pins, .npins = ARRAY_SIZE(uart1_pins), .modemuxs = uart1_modemux, .nmodemuxs = ARRAY_SIZE(uart1_modemux), }; static const char *const uart1_grps[] = { "uart1_grp" }; static struct spear_function uart1_function = { .name = "uart1", .groups = uart1_grps, .ngroups = ARRAY_SIZE(uart1_grps), }; /* Pad multiplexing for UART1 Modem device */ static const unsigned uart1_modem_2_to_7_pins[] = { 2, 3, 4, 5, 6, 7 }; static const unsigned uart1_modem_31_to_36_pins[] = { 31, 32, 33, 34, 35, 36 }; static const unsigned uart1_modem_34_to_45_pins[] = { 34, 35, 36, 43, 44, 45 }; static const unsigned uart1_modem_80_to_85_pins[] = { 80, 81, 82, 83, 84, 85 }; static struct spear_muxreg uart1_modem_ext_2_to_7_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_UART0_MASK | PMX_I2C_MASK | PMX_SSP_MASK, .val = 0, }, { .reg = IP_SEL_PAD_0_9_REG, .mask = PMX_PL_2_3_MASK | PMX_PL_6_7_MASK, .val = PMX_UART1_ENH_PL_2_3_VAL | PMX_UART1_ENH_PL_4_5_VAL | PMX_UART1_ENH_PL_6_7_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART1_ENH_PORT_SEL_MASK, .val = PMX_UART1_ENH_PORT_3_TO_5_7_VAL, }, }; static struct spear_muxreg uart1_modem_31_to_36_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_GPIO_PIN3_MASK | PMX_GPIO_PIN4_MASK | PMX_GPIO_PIN5_MASK | PMX_SSP_CS_MASK, .val = 0, }, }; static struct spear_muxreg uart1_modem_ext_31_to_36_muxreg[] = { { .reg = IP_SEL_PAD_30_39_REG, .mask = PMX_PL_31_MASK | PMX_PL_32_33_MASK | PMX_PL_34_MASK | PMX_PL_35_MASK | PMX_PL_36_MASK, .val = PMX_UART1_ENH_PL_31_VAL | PMX_UART1_ENH_PL_32_33_VAL | PMX_UART1_ENH_PL_34_VAL | PMX_UART1_ENH_PL_35_VAL | PMX_UART1_ENH_PL_36_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART1_ENH_PORT_SEL_MASK, .val = PMX_UART1_ENH_PORT_32_TO_34_36_VAL, }, }; static struct spear_muxreg uart1_modem_34_to_45_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_TIMER_0_1_MASK | PMX_TIMER_2_3_MASK | PMX_SSP_CS_MASK, .val = 0, }, }; static struct spear_muxreg uart1_modem_ext_34_to_45_muxreg[] = { { .reg = IP_SEL_PAD_30_39_REG, .mask = PMX_PL_34_MASK | PMX_PL_35_MASK | PMX_PL_36_MASK, .val = PMX_UART1_ENH_PL_34_VAL | PMX_UART1_ENH_PL_35_VAL | PMX_UART1_ENH_PL_36_VAL, }, { .reg = IP_SEL_PAD_40_49_REG, .mask = PMX_PL_43_MASK | PMX_PL_44_45_MASK, .val = PMX_UART1_ENH_PL_43_VAL | PMX_UART1_ENH_PL_44_45_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART1_ENH_PORT_SEL_MASK, .val = PMX_UART1_ENH_PORT_44_45_34_36_VAL, }, }; static struct spear_muxreg uart1_modem_ext_80_to_85_muxreg[] = { { .reg = IP_SEL_PAD_80_89_REG, .mask = PMX_PL_80_TO_85_MASK, .val = PMX_UART1_ENH_PL_80_TO_85_VAL, }, { .reg = IP_SEL_PAD_40_49_REG, .mask = PMX_PL_43_MASK | PMX_PL_44_45_MASK, .val = PMX_UART1_ENH_PL_43_VAL | PMX_UART1_ENH_PL_44_45_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART1_ENH_PORT_SEL_MASK, .val = PMX_UART1_ENH_PORT_81_TO_85_VAL, }, }; static struct spear_modemux uart1_modem_2_to_7_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = uart1_modem_ext_2_to_7_muxreg, .nmuxregs = ARRAY_SIZE(uart1_modem_ext_2_to_7_muxreg), }, }; static struct spear_modemux uart1_modem_31_to_36_modemux[] = { { .modes = SMALL_PRINTERS_MODE | EXTENDED_MODE, .muxregs = uart1_modem_31_to_36_muxreg, .nmuxregs = ARRAY_SIZE(uart1_modem_31_to_36_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = uart1_modem_ext_31_to_36_muxreg, .nmuxregs = ARRAY_SIZE(uart1_modem_ext_31_to_36_muxreg), }, }; static struct spear_modemux uart1_modem_34_to_45_modemux[] = { { .modes = AUTO_EXP_MODE | EXTENDED_MODE, .muxregs = uart1_modem_34_to_45_muxreg, .nmuxregs = ARRAY_SIZE(uart1_modem_34_to_45_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = uart1_modem_ext_34_to_45_muxreg, .nmuxregs = ARRAY_SIZE(uart1_modem_ext_34_to_45_muxreg), }, }; static struct spear_modemux uart1_modem_80_to_85_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = uart1_modem_ext_80_to_85_muxreg, .nmuxregs = ARRAY_SIZE(uart1_modem_ext_80_to_85_muxreg), }, }; static struct spear_pingroup uart1_modem_pingroup[] = { { .name = "uart1_modem_2_to_7_grp", .pins = uart1_modem_2_to_7_pins, .npins = ARRAY_SIZE(uart1_modem_2_to_7_pins), .modemuxs = uart1_modem_2_to_7_modemux, .nmodemuxs = ARRAY_SIZE(uart1_modem_2_to_7_modemux), }, { .name = "uart1_modem_31_to_36_grp", .pins = uart1_modem_31_to_36_pins, .npins = ARRAY_SIZE(uart1_modem_31_to_36_pins), .modemuxs = uart1_modem_31_to_36_modemux, .nmodemuxs = ARRAY_SIZE(uart1_modem_31_to_36_modemux), }, { .name = "uart1_modem_34_to_45_grp", .pins = uart1_modem_34_to_45_pins, .npins = ARRAY_SIZE(uart1_modem_34_to_45_pins), .modemuxs = uart1_modem_34_to_45_modemux, .nmodemuxs = ARRAY_SIZE(uart1_modem_34_to_45_modemux), }, { .name = "uart1_modem_80_to_85_grp", .pins = uart1_modem_80_to_85_pins, .npins = ARRAY_SIZE(uart1_modem_80_to_85_pins), .modemuxs = uart1_modem_80_to_85_modemux, .nmodemuxs = ARRAY_SIZE(uart1_modem_80_to_85_modemux), }, }; static const char *const uart1_modem_grps[] = { "uart1_modem_2_to_7_grp", "uart1_modem_31_to_36_grp", "uart1_modem_34_to_45_grp", "uart1_modem_80_to_85_grp" }; static struct spear_function uart1_modem_function = { .name = "uart1_modem", .groups = uart1_modem_grps, .ngroups = ARRAY_SIZE(uart1_modem_grps), }; /* Pad multiplexing for UART2 device */ static const unsigned uart2_pins[] = { 0, 1 }; static struct spear_muxreg uart2_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_FIRDA_MASK, .val = 0, }, }; static struct spear_muxreg uart2_ext_muxreg[] = { { .reg = IP_SEL_PAD_0_9_REG, .mask = PMX_PL_0_1_MASK, .val = PMX_UART2_PL_0_1_VAL, }, }; static struct spear_modemux uart2_modemux[] = { { .modes = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE | AUTO_EXP_MODE | SMALL_PRINTERS_MODE | EXTENDED_MODE, .muxregs = uart2_muxreg, .nmuxregs = ARRAY_SIZE(uart2_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = uart2_ext_muxreg, .nmuxregs = ARRAY_SIZE(uart2_ext_muxreg), }, }; static struct spear_pingroup uart2_pingroup = { .name = "uart2_grp", .pins = uart2_pins, .npins = ARRAY_SIZE(uart2_pins), .modemuxs = uart2_modemux, .nmodemuxs = ARRAY_SIZE(uart2_modemux), }; static const char *const uart2_grps[] = { "uart2_grp" }; static struct spear_function uart2_function = { .name = "uart2", .groups = uart2_grps, .ngroups = ARRAY_SIZE(uart2_grps), }; /* Pad multiplexing for uart3 device */ static const unsigned uart3_pins[][2] = { { 8, 9 }, { 15, 16 }, { 41, 42 }, { 52, 53 }, { 73, 74 }, { 94, 95 }, { 98, 99 } }; static struct spear_muxreg uart3_ext_8_9_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_SSP_MASK, .val = 0, }, { .reg = IP_SEL_PAD_0_9_REG, .mask = PMX_PL_8_9_MASK, .val = PMX_UART3_PL_8_9_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART3_PORT_SEL_MASK, .val = PMX_UART3_PORT_8_VAL, }, }; static struct spear_muxreg uart3_ext_15_16_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_MII_MASK, .val = 0, }, { .reg = IP_SEL_PAD_10_19_REG, .mask = PMX_PL_15_16_MASK, .val = PMX_UART3_PL_15_16_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART3_PORT_SEL_MASK, .val = PMX_UART3_PORT_15_VAL, }, }; static struct spear_muxreg uart3_ext_41_42_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_UART0_MODEM_MASK, .val = 0, }, { .reg = IP_SEL_PAD_40_49_REG, .mask = PMX_PL_41_42_MASK, .val = PMX_UART3_PL_41_42_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART3_PORT_SEL_MASK, .val = PMX_UART3_PORT_41_VAL, }, }; static struct spear_muxreg uart3_ext_52_53_muxreg[] = { { .reg = IP_SEL_PAD_50_59_REG, .mask = PMX_PL_52_53_MASK, .val = PMX_UART3_PL_52_53_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART3_PORT_SEL_MASK, .val = PMX_UART3_PORT_52_VAL, }, }; static struct spear_muxreg uart3_ext_73_74_muxreg[] = { { .reg = IP_SEL_PAD_70_79_REG, .mask = PMX_PL_73_MASK | PMX_PL_74_MASK, .val = PMX_UART3_PL_73_VAL | PMX_UART3_PL_74_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART3_PORT_SEL_MASK, .val = PMX_UART3_PORT_73_VAL, }, }; static struct spear_muxreg uart3_ext_94_95_muxreg[] = { { .reg = IP_SEL_PAD_90_99_REG, .mask = PMX_PL_94_95_MASK, .val = PMX_UART3_PL_94_95_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART3_PORT_SEL_MASK, .val = PMX_UART3_PORT_94_VAL, }, }; static struct spear_muxreg uart3_ext_98_99_muxreg[] = { { .reg = IP_SEL_PAD_90_99_REG, .mask = PMX_PL_98_MASK | PMX_PL_99_MASK, .val = PMX_UART3_PL_98_VAL | PMX_UART3_PL_99_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART3_PORT_SEL_MASK, .val = PMX_UART3_PORT_99_VAL, }, }; static struct spear_modemux uart3_modemux[][1] = { { /* Select signals on pins 8_9 */ { .modes = EXTENDED_MODE, .muxregs = uart3_ext_8_9_muxreg, .nmuxregs = ARRAY_SIZE(uart3_ext_8_9_muxreg), }, }, { /* Select signals on pins 15_16 */ { .modes = EXTENDED_MODE, .muxregs = uart3_ext_15_16_muxreg, .nmuxregs = ARRAY_SIZE(uart3_ext_15_16_muxreg), }, }, { /* Select signals on pins 41_42 */ { .modes = EXTENDED_MODE, .muxregs = uart3_ext_41_42_muxreg, .nmuxregs = ARRAY_SIZE(uart3_ext_41_42_muxreg), }, }, { /* Select signals on pins 52_53 */ { .modes = EXTENDED_MODE, .muxregs = uart3_ext_52_53_muxreg, .nmuxregs = ARRAY_SIZE(uart3_ext_52_53_muxreg), }, }, { /* Select signals on pins 73_74 */ { .modes = EXTENDED_MODE, .muxregs = uart3_ext_73_74_muxreg, .nmuxregs = ARRAY_SIZE(uart3_ext_73_74_muxreg), }, }, { /* Select signals on pins 94_95 */ { .modes = EXTENDED_MODE, .muxregs = uart3_ext_94_95_muxreg, .nmuxregs = ARRAY_SIZE(uart3_ext_94_95_muxreg), }, }, { /* Select signals on pins 98_99 */ { .modes = EXTENDED_MODE, .muxregs = uart3_ext_98_99_muxreg, .nmuxregs = ARRAY_SIZE(uart3_ext_98_99_muxreg), }, }, }; static struct spear_pingroup uart3_pingroup[] = { { .name = "uart3_8_9_grp", .pins = uart3_pins[0], .npins = ARRAY_SIZE(uart3_pins[0]), .modemuxs = uart3_modemux[0], .nmodemuxs = ARRAY_SIZE(uart3_modemux[0]), }, { .name = "uart3_15_16_grp", .pins = uart3_pins[1], .npins = ARRAY_SIZE(uart3_pins[1]), .modemuxs = uart3_modemux[1], .nmodemuxs = ARRAY_SIZE(uart3_modemux[1]), }, { .name = "uart3_41_42_grp", .pins = uart3_pins[2], .npins = ARRAY_SIZE(uart3_pins[2]), .modemuxs = uart3_modemux[2], .nmodemuxs = ARRAY_SIZE(uart3_modemux[2]), }, { .name = "uart3_52_53_grp", .pins = uart3_pins[3], .npins = ARRAY_SIZE(uart3_pins[3]), .modemuxs = uart3_modemux[3], .nmodemuxs = ARRAY_SIZE(uart3_modemux[3]), }, { .name = "uart3_73_74_grp", .pins = uart3_pins[4], .npins = ARRAY_SIZE(uart3_pins[4]), .modemuxs = uart3_modemux[4], .nmodemuxs = ARRAY_SIZE(uart3_modemux[4]), }, { .name = "uart3_94_95_grp", .pins = uart3_pins[5], .npins = ARRAY_SIZE(uart3_pins[5]), .modemuxs = uart3_modemux[5], .nmodemuxs = ARRAY_SIZE(uart3_modemux[5]), }, { .name = "uart3_98_99_grp", .pins = uart3_pins[6], .npins = ARRAY_SIZE(uart3_pins[6]), .modemuxs = uart3_modemux[6], .nmodemuxs = ARRAY_SIZE(uart3_modemux[6]), }, }; static const char *const uart3_grps[] = { "uart3_8_9_grp", "uart3_15_16_grp", "uart3_41_42_grp", "uart3_52_53_grp", "uart3_73_74_grp", "uart3_94_95_grp", "uart3_98_99_grp" }; static struct spear_function uart3_function = { .name = "uart3", .groups = uart3_grps, .ngroups = ARRAY_SIZE(uart3_grps), }; /* Pad multiplexing for uart4 device */ static const unsigned uart4_pins[][2] = { { 6, 7 }, { 13, 14 }, { 39, 40 }, { 71, 72 }, { 92, 93 }, { 100, 101 } }; static struct spear_muxreg uart4_ext_6_7_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_SSP_MASK, .val = 0, }, { .reg = IP_SEL_PAD_0_9_REG, .mask = PMX_PL_6_7_MASK, .val = PMX_UART4_PL_6_7_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART4_PORT_SEL_MASK, .val = PMX_UART4_PORT_6_VAL, }, }; static struct spear_muxreg uart4_ext_13_14_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_MII_MASK, .val = 0, }, { .reg = IP_SEL_PAD_10_19_REG, .mask = PMX_PL_13_14_MASK, .val = PMX_UART4_PL_13_14_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART4_PORT_SEL_MASK, .val = PMX_UART4_PORT_13_VAL, }, }; static struct spear_muxreg uart4_ext_39_40_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_UART0_MODEM_MASK, .val = 0, }, { .reg = IP_SEL_PAD_30_39_REG, .mask = PMX_PL_39_MASK, .val = PMX_UART4_PL_39_VAL, }, { .reg = IP_SEL_PAD_40_49_REG, .mask = PMX_PL_40_MASK, .val = PMX_UART4_PL_40_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART4_PORT_SEL_MASK, .val = PMX_UART4_PORT_39_VAL, }, }; static struct spear_muxreg uart4_ext_71_72_muxreg[] = { { .reg = IP_SEL_PAD_70_79_REG, .mask = PMX_PL_71_72_MASK, .val = PMX_UART4_PL_71_72_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART4_PORT_SEL_MASK, .val = PMX_UART4_PORT_71_VAL, }, }; static struct spear_muxreg uart4_ext_92_93_muxreg[] = { { .reg = IP_SEL_PAD_90_99_REG, .mask = PMX_PL_92_93_MASK, .val = PMX_UART4_PL_92_93_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART4_PORT_SEL_MASK, .val = PMX_UART4_PORT_92_VAL, }, }; static struct spear_muxreg uart4_ext_100_101_muxreg[] = { { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_PL_100_101_MASK | PMX_UART4_PORT_SEL_MASK, .val = PMX_UART4_PL_100_101_VAL | PMX_UART4_PORT_101_VAL, }, }; static struct spear_modemux uart4_modemux[][1] = { { /* Select signals on pins 6_7 */ { .modes = EXTENDED_MODE, .muxregs = uart4_ext_6_7_muxreg, .nmuxregs = ARRAY_SIZE(uart4_ext_6_7_muxreg), }, }, { /* Select signals on pins 13_14 */ { .modes = EXTENDED_MODE, .muxregs = uart4_ext_13_14_muxreg, .nmuxregs = ARRAY_SIZE(uart4_ext_13_14_muxreg), }, }, { /* Select signals on pins 39_40 */ { .modes = EXTENDED_MODE, .muxregs = uart4_ext_39_40_muxreg, .nmuxregs = ARRAY_SIZE(uart4_ext_39_40_muxreg), }, }, { /* Select signals on pins 71_72 */ { .modes = EXTENDED_MODE, .muxregs = uart4_ext_71_72_muxreg, .nmuxregs = ARRAY_SIZE(uart4_ext_71_72_muxreg), }, }, { /* Select signals on pins 92_93 */ { .modes = EXTENDED_MODE, .muxregs = uart4_ext_92_93_muxreg, .nmuxregs = ARRAY_SIZE(uart4_ext_92_93_muxreg), }, }, { /* Select signals on pins 100_101_ */ { .modes = EXTENDED_MODE, .muxregs = uart4_ext_100_101_muxreg, .nmuxregs = ARRAY_SIZE(uart4_ext_100_101_muxreg), }, }, }; static struct spear_pingroup uart4_pingroup[] = { { .name = "uart4_6_7_grp", .pins = uart4_pins[0], .npins = ARRAY_SIZE(uart4_pins[0]), .modemuxs = uart4_modemux[0], .nmodemuxs = ARRAY_SIZE(uart4_modemux[0]), }, { .name = "uart4_13_14_grp", .pins = uart4_pins[1], .npins = ARRAY_SIZE(uart4_pins[1]), .modemuxs = uart4_modemux[1], .nmodemuxs = ARRAY_SIZE(uart4_modemux[1]), }, { .name = "uart4_39_40_grp", .pins = uart4_pins[2], .npins = ARRAY_SIZE(uart4_pins[2]), .modemuxs = uart4_modemux[2], .nmodemuxs = ARRAY_SIZE(uart4_modemux[2]), }, { .name = "uart4_71_72_grp", .pins = uart4_pins[3], .npins = ARRAY_SIZE(uart4_pins[3]), .modemuxs = uart4_modemux[3], .nmodemuxs = ARRAY_SIZE(uart4_modemux[3]), }, { .name = "uart4_92_93_grp", .pins = uart4_pins[4], .npins = ARRAY_SIZE(uart4_pins[4]), .modemuxs = uart4_modemux[4], .nmodemuxs = ARRAY_SIZE(uart4_modemux[4]), }, { .name = "uart4_100_101_grp", .pins = uart4_pins[5], .npins = ARRAY_SIZE(uart4_pins[5]), .modemuxs = uart4_modemux[5], .nmodemuxs = ARRAY_SIZE(uart4_modemux[5]), }, }; static const char *const uart4_grps[] = { "uart4_6_7_grp", "uart4_13_14_grp", "uart4_39_40_grp", "uart4_71_72_grp", "uart4_92_93_grp", "uart4_100_101_grp" }; static struct spear_function uart4_function = { .name = "uart4", .groups = uart4_grps, .ngroups = ARRAY_SIZE(uart4_grps), }; /* Pad multiplexing for uart5 device */ static const unsigned uart5_pins[][2] = { { 4, 5 }, { 37, 38 }, { 69, 70 }, { 90, 91 } }; static struct spear_muxreg uart5_ext_4_5_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_I2C_MASK, .val = 0, }, { .reg = IP_SEL_PAD_0_9_REG, .mask = PMX_PL_4_5_MASK, .val = PMX_UART5_PL_4_5_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART5_PORT_SEL_MASK, .val = PMX_UART5_PORT_4_VAL, }, }; static struct spear_muxreg uart5_ext_37_38_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_UART0_MODEM_MASK, .val = 0, }, { .reg = IP_SEL_PAD_30_39_REG, .mask = PMX_PL_37_38_MASK, .val = PMX_UART5_PL_37_38_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART5_PORT_SEL_MASK, .val = PMX_UART5_PORT_37_VAL, }, }; static struct spear_muxreg uart5_ext_69_70_muxreg[] = { { .reg = IP_SEL_PAD_60_69_REG, .mask = PMX_PL_69_MASK, .val = PMX_UART5_PL_69_VAL, }, { .reg = IP_SEL_PAD_70_79_REG, .mask = PMX_PL_70_MASK, .val = PMX_UART5_PL_70_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART5_PORT_SEL_MASK, .val = PMX_UART5_PORT_69_VAL, }, }; static struct spear_muxreg uart5_ext_90_91_muxreg[] = { { .reg = IP_SEL_PAD_90_99_REG, .mask = PMX_PL_90_91_MASK, .val = PMX_UART5_PL_90_91_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART5_PORT_SEL_MASK, .val = PMX_UART5_PORT_90_VAL, }, }; static struct spear_modemux uart5_modemux[][1] = { { /* Select signals on pins 4_5 */ { .modes = EXTENDED_MODE, .muxregs = uart5_ext_4_5_muxreg, .nmuxregs = ARRAY_SIZE(uart5_ext_4_5_muxreg), }, }, { /* Select signals on pins 37_38 */ { .modes = EXTENDED_MODE, .muxregs = uart5_ext_37_38_muxreg, .nmuxregs = ARRAY_SIZE(uart5_ext_37_38_muxreg), }, }, { /* Select signals on pins 69_70 */ { .modes = EXTENDED_MODE, .muxregs = uart5_ext_69_70_muxreg, .nmuxregs = ARRAY_SIZE(uart5_ext_69_70_muxreg), }, }, { /* Select signals on pins 90_91 */ { .modes = EXTENDED_MODE, .muxregs = uart5_ext_90_91_muxreg, .nmuxregs = ARRAY_SIZE(uart5_ext_90_91_muxreg), }, }, }; static struct spear_pingroup uart5_pingroup[] = { { .name = "uart5_4_5_grp", .pins = uart5_pins[0], .npins = ARRAY_SIZE(uart5_pins[0]), .modemuxs = uart5_modemux[0], .nmodemuxs = ARRAY_SIZE(uart5_modemux[0]), }, { .name = "uart5_37_38_grp", .pins = uart5_pins[1], .npins = ARRAY_SIZE(uart5_pins[1]), .modemuxs = uart5_modemux[1], .nmodemuxs = ARRAY_SIZE(uart5_modemux[1]), }, { .name = "uart5_69_70_grp", .pins = uart5_pins[2], .npins = ARRAY_SIZE(uart5_pins[2]), .modemuxs = uart5_modemux[2], .nmodemuxs = ARRAY_SIZE(uart5_modemux[2]), }, { .name = "uart5_90_91_grp", .pins = uart5_pins[3], .npins = ARRAY_SIZE(uart5_pins[3]), .modemuxs = uart5_modemux[3], .nmodemuxs = ARRAY_SIZE(uart5_modemux[3]), }, }; static const char *const uart5_grps[] = { "uart5_4_5_grp", "uart5_37_38_grp", "uart5_69_70_grp", "uart5_90_91_grp" }; static struct spear_function uart5_function = { .name = "uart5", .groups = uart5_grps, .ngroups = ARRAY_SIZE(uart5_grps), }; /* Pad multiplexing for uart6 device */ static const unsigned uart6_pins[][2] = { { 2, 3 }, { 88, 89 } }; static struct spear_muxreg uart6_ext_2_3_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_UART0_MASK, .val = 0, }, { .reg = IP_SEL_PAD_0_9_REG, .mask = PMX_PL_2_3_MASK, .val = PMX_UART6_PL_2_3_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART6_PORT_SEL_MASK, .val = PMX_UART6_PORT_2_VAL, }, }; static struct spear_muxreg uart6_ext_88_89_muxreg[] = { { .reg = IP_SEL_PAD_80_89_REG, .mask = PMX_PL_88_89_MASK, .val = PMX_UART6_PL_88_89_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_UART6_PORT_SEL_MASK, .val = PMX_UART6_PORT_88_VAL, }, }; static struct spear_modemux uart6_modemux[][1] = { { /* Select signals on pins 2_3 */ { .modes = EXTENDED_MODE, .muxregs = uart6_ext_2_3_muxreg, .nmuxregs = ARRAY_SIZE(uart6_ext_2_3_muxreg), }, }, { /* Select signals on pins 88_89 */ { .modes = EXTENDED_MODE, .muxregs = uart6_ext_88_89_muxreg, .nmuxregs = ARRAY_SIZE(uart6_ext_88_89_muxreg), }, }, }; static struct spear_pingroup uart6_pingroup[] = { { .name = "uart6_2_3_grp", .pins = uart6_pins[0], .npins = ARRAY_SIZE(uart6_pins[0]), .modemuxs = uart6_modemux[0], .nmodemuxs = ARRAY_SIZE(uart6_modemux[0]), }, { .name = "uart6_88_89_grp", .pins = uart6_pins[1], .npins = ARRAY_SIZE(uart6_pins[1]), .modemuxs = uart6_modemux[1], .nmodemuxs = ARRAY_SIZE(uart6_modemux[1]), }, }; static const char *const uart6_grps[] = { "uart6_2_3_grp", "uart6_88_89_grp" }; static struct spear_function uart6_function = { .name = "uart6", .groups = uart6_grps, .ngroups = ARRAY_SIZE(uart6_grps), }; /* UART - RS485 pmx */ static const unsigned rs485_pins[] = { 77, 78, 79 }; static struct spear_muxreg rs485_muxreg[] = { { .reg = IP_SEL_PAD_70_79_REG, .mask = PMX_PL_77_78_79_MASK, .val = PMX_RS485_PL_77_78_79_VAL, }, }; static struct spear_modemux rs485_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = rs485_muxreg, .nmuxregs = ARRAY_SIZE(rs485_muxreg), }, }; static struct spear_pingroup rs485_pingroup = { .name = "rs485_grp", .pins = rs485_pins, .npins = ARRAY_SIZE(rs485_pins), .modemuxs = rs485_modemux, .nmodemuxs = ARRAY_SIZE(rs485_modemux), }; static const char *const rs485_grps[] = { "rs485_grp" }; static struct spear_function rs485_function = { .name = "rs485", .groups = rs485_grps, .ngroups = ARRAY_SIZE(rs485_grps), }; /* Pad multiplexing for Touchscreen device */ static const unsigned touchscreen_pins[] = { 5, 36 }; static struct spear_muxreg touchscreen_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_I2C_MASK | PMX_SSP_CS_MASK, .val = 0, }, }; static struct spear_muxreg touchscreen_ext_muxreg[] = { { .reg = IP_SEL_PAD_0_9_REG, .mask = PMX_PL_5_MASK, .val = PMX_TOUCH_Y_PL_5_VAL, }, { .reg = IP_SEL_PAD_30_39_REG, .mask = PMX_PL_36_MASK, .val = PMX_TOUCH_X_PL_36_VAL, }, }; static struct spear_modemux touchscreen_modemux[] = { { .modes = AUTO_NET_SMII_MODE | EXTENDED_MODE, .muxregs = touchscreen_muxreg, .nmuxregs = ARRAY_SIZE(touchscreen_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = touchscreen_ext_muxreg, .nmuxregs = ARRAY_SIZE(touchscreen_ext_muxreg), }, }; static struct spear_pingroup touchscreen_pingroup = { .name = "touchscreen_grp", .pins = touchscreen_pins, .npins = ARRAY_SIZE(touchscreen_pins), .modemuxs = touchscreen_modemux, .nmodemuxs = ARRAY_SIZE(touchscreen_modemux), }; static const char *const touchscreen_grps[] = { "touchscreen_grp" }; static struct spear_function touchscreen_function = { .name = "touchscreen", .groups = touchscreen_grps, .ngroups = ARRAY_SIZE(touchscreen_grps), }; /* Pad multiplexing for CAN device */ static const unsigned can0_pins[] = { 32, 33 }; static struct spear_muxreg can0_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_GPIO_PIN4_MASK | PMX_GPIO_PIN5_MASK, .val = 0, }, }; static struct spear_muxreg can0_ext_muxreg[] = { { .reg = IP_SEL_PAD_30_39_REG, .mask = PMX_PL_32_33_MASK, .val = PMX_CAN0_PL_32_33_VAL, }, }; static struct spear_modemux can0_modemux[] = { { .modes = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE | AUTO_EXP_MODE | EXTENDED_MODE, .muxregs = can0_muxreg, .nmuxregs = ARRAY_SIZE(can0_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = can0_ext_muxreg, .nmuxregs = ARRAY_SIZE(can0_ext_muxreg), }, }; static struct spear_pingroup can0_pingroup = { .name = "can0_grp", .pins = can0_pins, .npins = ARRAY_SIZE(can0_pins), .modemuxs = can0_modemux, .nmodemuxs = ARRAY_SIZE(can0_modemux), }; static const char *const can0_grps[] = { "can0_grp" }; static struct spear_function can0_function = { .name = "can0", .groups = can0_grps, .ngroups = ARRAY_SIZE(can0_grps), }; static const unsigned can1_pins[] = { 30, 31 }; static struct spear_muxreg can1_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_GPIO_PIN2_MASK | PMX_GPIO_PIN3_MASK, .val = 0, }, }; static struct spear_muxreg can1_ext_muxreg[] = { { .reg = IP_SEL_PAD_30_39_REG, .mask = PMX_PL_30_31_MASK, .val = PMX_CAN1_PL_30_31_VAL, }, }; static struct spear_modemux can1_modemux[] = { { .modes = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE | AUTO_EXP_MODE | EXTENDED_MODE, .muxregs = can1_muxreg, .nmuxregs = ARRAY_SIZE(can1_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = can1_ext_muxreg, .nmuxregs = ARRAY_SIZE(can1_ext_muxreg), }, }; static struct spear_pingroup can1_pingroup = { .name = "can1_grp", .pins = can1_pins, .npins = ARRAY_SIZE(can1_pins), .modemuxs = can1_modemux, .nmodemuxs = ARRAY_SIZE(can1_modemux), }; static const char *const can1_grps[] = { "can1_grp" }; static struct spear_function can1_function = { .name = "can1", .groups = can1_grps, .ngroups = ARRAY_SIZE(can1_grps), }; /* Pad multiplexing for PWM0_1 device */ static const unsigned pwm0_1_pins[][2] = { { 37, 38 }, { 14, 15 }, { 8, 9 }, { 30, 31 }, { 42, 43 }, { 59, 60 }, { 88, 89 } }; static struct spear_muxreg pwm0_1_pin_8_9_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_SSP_MASK, .val = 0, }, { .reg = IP_SEL_PAD_0_9_REG, .mask = PMX_PL_8_9_MASK, .val = PMX_PWM_0_1_PL_8_9_VAL, }, }; static struct spear_muxreg pwm0_1_autoexpsmallpri_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_MII_MASK, .val = 0, }, }; static struct spear_muxreg pwm0_1_pin_14_15_muxreg[] = { { .reg = IP_SEL_PAD_10_19_REG, .mask = PMX_PL_14_MASK | PMX_PL_15_MASK, .val = PMX_PWM1_PL_14_VAL | PMX_PWM0_PL_15_VAL, }, }; static struct spear_muxreg pwm0_1_pin_30_31_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_GPIO_PIN2_MASK | PMX_GPIO_PIN3_MASK, .val = 0, }, { .reg = IP_SEL_PAD_30_39_REG, .mask = PMX_PL_30_MASK | PMX_PL_31_MASK, .val = PMX_PWM1_EXT_PL_30_VAL | PMX_PWM0_EXT_PL_31_VAL, }, }; static struct spear_muxreg pwm0_1_net_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_UART0_MODEM_MASK, .val = 0, }, }; static struct spear_muxreg pwm0_1_pin_37_38_muxreg[] = { { .reg = IP_SEL_PAD_30_39_REG, .mask = PMX_PL_37_38_MASK, .val = PMX_PWM0_1_PL_37_38_VAL, }, }; static struct spear_muxreg pwm0_1_pin_42_43_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_UART0_MODEM_MASK | PMX_TIMER_0_1_MASK , .val = 0, }, { .reg = IP_SEL_PAD_40_49_REG, .mask = PMX_PL_42_MASK | PMX_PL_43_MASK, .val = PMX_PWM1_PL_42_VAL | PMX_PWM0_PL_43_VAL, }, }; static struct spear_muxreg pwm0_1_pin_59_60_muxreg[] = { { .reg = IP_SEL_PAD_50_59_REG, .mask = PMX_PL_59_MASK, .val = PMX_PWM1_PL_59_VAL, }, { .reg = IP_SEL_PAD_60_69_REG, .mask = PMX_PL_60_MASK, .val = PMX_PWM0_PL_60_VAL, }, }; static struct spear_muxreg pwm0_1_pin_88_89_muxreg[] = { { .reg = IP_SEL_PAD_80_89_REG, .mask = PMX_PL_88_89_MASK, .val = PMX_PWM0_1_PL_88_89_VAL, }, }; static struct spear_modemux pwm0_1_pin_8_9_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = pwm0_1_pin_8_9_muxreg, .nmuxregs = ARRAY_SIZE(pwm0_1_pin_8_9_muxreg), }, }; static struct spear_modemux pwm0_1_pin_14_15_modemux[] = { { .modes = AUTO_EXP_MODE | SMALL_PRINTERS_MODE | EXTENDED_MODE, .muxregs = pwm0_1_autoexpsmallpri_muxreg, .nmuxregs = ARRAY_SIZE(pwm0_1_autoexpsmallpri_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = pwm0_1_pin_14_15_muxreg, .nmuxregs = ARRAY_SIZE(pwm0_1_pin_14_15_muxreg), }, }; static struct spear_modemux pwm0_1_pin_30_31_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = pwm0_1_pin_30_31_muxreg, .nmuxregs = ARRAY_SIZE(pwm0_1_pin_30_31_muxreg), }, }; static struct spear_modemux pwm0_1_pin_37_38_modemux[] = { { .modes = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE | EXTENDED_MODE, .muxregs = pwm0_1_net_muxreg, .nmuxregs = ARRAY_SIZE(pwm0_1_net_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = pwm0_1_pin_37_38_muxreg, .nmuxregs = ARRAY_SIZE(pwm0_1_pin_37_38_muxreg), }, }; static struct spear_modemux pwm0_1_pin_42_43_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = pwm0_1_pin_42_43_muxreg, .nmuxregs = ARRAY_SIZE(pwm0_1_pin_42_43_muxreg), }, }; static struct spear_modemux pwm0_1_pin_59_60_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = pwm0_1_pin_59_60_muxreg, .nmuxregs = ARRAY_SIZE(pwm0_1_pin_59_60_muxreg), }, }; static struct spear_modemux pwm0_1_pin_88_89_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = pwm0_1_pin_88_89_muxreg, .nmuxregs = ARRAY_SIZE(pwm0_1_pin_88_89_muxreg), }, }; static struct spear_pingroup pwm0_1_pingroup[] = { { .name = "pwm0_1_pin_8_9_grp", .pins = pwm0_1_pins[0], .npins = ARRAY_SIZE(pwm0_1_pins[0]), .modemuxs = pwm0_1_pin_8_9_modemux, .nmodemuxs = ARRAY_SIZE(pwm0_1_pin_8_9_modemux), }, { .name = "pwm0_1_pin_14_15_grp", .pins = pwm0_1_pins[1], .npins = ARRAY_SIZE(pwm0_1_pins[1]), .modemuxs = pwm0_1_pin_14_15_modemux, .nmodemuxs = ARRAY_SIZE(pwm0_1_pin_14_15_modemux), }, { .name = "pwm0_1_pin_30_31_grp", .pins = pwm0_1_pins[2], .npins = ARRAY_SIZE(pwm0_1_pins[2]), .modemuxs = pwm0_1_pin_30_31_modemux, .nmodemuxs = ARRAY_SIZE(pwm0_1_pin_30_31_modemux), }, { .name = "pwm0_1_pin_37_38_grp", .pins = pwm0_1_pins[3], .npins = ARRAY_SIZE(pwm0_1_pins[3]), .modemuxs = pwm0_1_pin_37_38_modemux, .nmodemuxs = ARRAY_SIZE(pwm0_1_pin_37_38_modemux), }, { .name = "pwm0_1_pin_42_43_grp", .pins = pwm0_1_pins[4], .npins = ARRAY_SIZE(pwm0_1_pins[4]), .modemuxs = pwm0_1_pin_42_43_modemux, .nmodemuxs = ARRAY_SIZE(pwm0_1_pin_42_43_modemux), }, { .name = "pwm0_1_pin_59_60_grp", .pins = pwm0_1_pins[5], .npins = ARRAY_SIZE(pwm0_1_pins[5]), .modemuxs = pwm0_1_pin_59_60_modemux, .nmodemuxs = ARRAY_SIZE(pwm0_1_pin_59_60_modemux), }, { .name = "pwm0_1_pin_88_89_grp", .pins = pwm0_1_pins[6], .npins = ARRAY_SIZE(pwm0_1_pins[6]), .modemuxs = pwm0_1_pin_88_89_modemux, .nmodemuxs = ARRAY_SIZE(pwm0_1_pin_88_89_modemux), }, }; static const char *const pwm0_1_grps[] = { "pwm0_1_pin_8_9_grp", "pwm0_1_pin_14_15_grp", "pwm0_1_pin_30_31_grp", "pwm0_1_pin_37_38_grp", "pwm0_1_pin_42_43_grp", "pwm0_1_pin_59_60_grp", "pwm0_1_pin_88_89_grp" }; static struct spear_function pwm0_1_function = { .name = "pwm0_1", .groups = pwm0_1_grps, .ngroups = ARRAY_SIZE(pwm0_1_grps), }; /* Pad multiplexing for PWM2 device */ static const unsigned pwm2_pins[][1] = { { 7 }, { 13 }, { 29 }, { 34 }, { 41 }, { 58 }, { 87 } }; static struct spear_muxreg pwm2_net_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_SSP_CS_MASK, .val = 0, }, }; static struct spear_muxreg pwm2_pin_7_muxreg[] = { { .reg = IP_SEL_PAD_0_9_REG, .mask = PMX_PL_7_MASK, .val = PMX_PWM_2_PL_7_VAL, }, }; static struct spear_muxreg pwm2_autoexpsmallpri_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_MII_MASK, .val = 0, }, }; static struct spear_muxreg pwm2_pin_13_muxreg[] = { { .reg = IP_SEL_PAD_10_19_REG, .mask = PMX_PL_13_MASK, .val = PMX_PWM2_PL_13_VAL, }, }; static struct spear_muxreg pwm2_pin_29_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_GPIO_PIN1_MASK, .val = 0, }, { .reg = IP_SEL_PAD_20_29_REG, .mask = PMX_PL_29_MASK, .val = PMX_PWM_2_PL_29_VAL, }, }; static struct spear_muxreg pwm2_pin_34_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_SSP_CS_MASK, .val = 0, }, { .reg = MODE_CONFIG_REG, .mask = PMX_PWM_MASK, .val = PMX_PWM_MASK, }, { .reg = IP_SEL_PAD_30_39_REG, .mask = PMX_PL_34_MASK, .val = PMX_PWM2_PL_34_VAL, }, }; static struct spear_muxreg pwm2_pin_41_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_UART0_MODEM_MASK, .val = 0, }, { .reg = IP_SEL_PAD_40_49_REG, .mask = PMX_PL_41_MASK, .val = PMX_PWM2_PL_41_VAL, }, }; static struct spear_muxreg pwm2_pin_58_muxreg[] = { { .reg = IP_SEL_PAD_50_59_REG, .mask = PMX_PL_58_MASK, .val = PMX_PWM2_PL_58_VAL, }, }; static struct spear_muxreg pwm2_pin_87_muxreg[] = { { .reg = IP_SEL_PAD_80_89_REG, .mask = PMX_PL_87_MASK, .val = PMX_PWM2_PL_87_VAL, }, }; static struct spear_modemux pwm2_pin_7_modemux[] = { { .modes = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE | EXTENDED_MODE, .muxregs = pwm2_net_muxreg, .nmuxregs = ARRAY_SIZE(pwm2_net_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = pwm2_pin_7_muxreg, .nmuxregs = ARRAY_SIZE(pwm2_pin_7_muxreg), }, }; static struct spear_modemux pwm2_pin_13_modemux[] = { { .modes = AUTO_EXP_MODE | SMALL_PRINTERS_MODE | EXTENDED_MODE, .muxregs = pwm2_autoexpsmallpri_muxreg, .nmuxregs = ARRAY_SIZE(pwm2_autoexpsmallpri_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = pwm2_pin_13_muxreg, .nmuxregs = ARRAY_SIZE(pwm2_pin_13_muxreg), }, }; static struct spear_modemux pwm2_pin_29_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = pwm2_pin_29_muxreg, .nmuxregs = ARRAY_SIZE(pwm2_pin_29_muxreg), }, }; static struct spear_modemux pwm2_pin_34_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = pwm2_pin_34_muxreg, .nmuxregs = ARRAY_SIZE(pwm2_pin_34_muxreg), }, }; static struct spear_modemux pwm2_pin_41_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = pwm2_pin_41_muxreg, .nmuxregs = ARRAY_SIZE(pwm2_pin_41_muxreg), }, }; static struct spear_modemux pwm2_pin_58_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = pwm2_pin_58_muxreg, .nmuxregs = ARRAY_SIZE(pwm2_pin_58_muxreg), }, }; static struct spear_modemux pwm2_pin_87_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = pwm2_pin_87_muxreg, .nmuxregs = ARRAY_SIZE(pwm2_pin_87_muxreg), }, }; static struct spear_pingroup pwm2_pingroup[] = { { .name = "pwm2_pin_7_grp", .pins = pwm2_pins[0], .npins = ARRAY_SIZE(pwm2_pins[0]), .modemuxs = pwm2_pin_7_modemux, .nmodemuxs = ARRAY_SIZE(pwm2_pin_7_modemux), }, { .name = "pwm2_pin_13_grp", .pins = pwm2_pins[1], .npins = ARRAY_SIZE(pwm2_pins[1]), .modemuxs = pwm2_pin_13_modemux, .nmodemuxs = ARRAY_SIZE(pwm2_pin_13_modemux), }, { .name = "pwm2_pin_29_grp", .pins = pwm2_pins[2], .npins = ARRAY_SIZE(pwm2_pins[2]), .modemuxs = pwm2_pin_29_modemux, .nmodemuxs = ARRAY_SIZE(pwm2_pin_29_modemux), }, { .name = "pwm2_pin_34_grp", .pins = pwm2_pins[3], .npins = ARRAY_SIZE(pwm2_pins[3]), .modemuxs = pwm2_pin_34_modemux, .nmodemuxs = ARRAY_SIZE(pwm2_pin_34_modemux), }, { .name = "pwm2_pin_41_grp", .pins = pwm2_pins[4], .npins = ARRAY_SIZE(pwm2_pins[4]), .modemuxs = pwm2_pin_41_modemux, .nmodemuxs = ARRAY_SIZE(pwm2_pin_41_modemux), }, { .name = "pwm2_pin_58_grp", .pins = pwm2_pins[5], .npins = ARRAY_SIZE(pwm2_pins[5]), .modemuxs = pwm2_pin_58_modemux, .nmodemuxs = ARRAY_SIZE(pwm2_pin_58_modemux), }, { .name = "pwm2_pin_87_grp", .pins = pwm2_pins[6], .npins = ARRAY_SIZE(pwm2_pins[6]), .modemuxs = pwm2_pin_87_modemux, .nmodemuxs = ARRAY_SIZE(pwm2_pin_87_modemux), }, }; static const char *const pwm2_grps[] = { "pwm2_pin_7_grp", "pwm2_pin_13_grp", "pwm2_pin_29_grp", "pwm2_pin_34_grp", "pwm2_pin_41_grp", "pwm2_pin_58_grp", "pwm2_pin_87_grp" }; static struct spear_function pwm2_function = { .name = "pwm2", .groups = pwm2_grps, .ngroups = ARRAY_SIZE(pwm2_grps), }; /* Pad multiplexing for PWM3 device */ static const unsigned pwm3_pins[][1] = { { 6 }, { 12 }, { 28 }, { 40 }, { 57 }, { 86 } }; static struct spear_muxreg pwm3_pin_6_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_SSP_MASK, .val = 0, }, { .reg = IP_SEL_PAD_0_9_REG, .mask = PMX_PL_6_MASK, .val = PMX_PWM_3_PL_6_VAL, }, }; static struct spear_muxreg pwm3_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_MII_MASK, .val = 0, }, }; static struct spear_muxreg pwm3_pin_12_muxreg[] = { { .reg = IP_SEL_PAD_10_19_REG, .mask = PMX_PL_12_MASK, .val = PMX_PWM3_PL_12_VAL, }, }; static struct spear_muxreg pwm3_pin_28_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_GPIO_PIN0_MASK, .val = 0, }, { .reg = IP_SEL_PAD_20_29_REG, .mask = PMX_PL_28_MASK, .val = PMX_PWM_3_PL_28_VAL, }, }; static struct spear_muxreg pwm3_pin_40_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_UART0_MODEM_MASK, .val = 0, }, { .reg = IP_SEL_PAD_40_49_REG, .mask = PMX_PL_40_MASK, .val = PMX_PWM3_PL_40_VAL, }, }; static struct spear_muxreg pwm3_pin_57_muxreg[] = { { .reg = IP_SEL_PAD_50_59_REG, .mask = PMX_PL_57_MASK, .val = PMX_PWM3_PL_57_VAL, }, }; static struct spear_muxreg pwm3_pin_86_muxreg[] = { { .reg = IP_SEL_PAD_80_89_REG, .mask = PMX_PL_86_MASK, .val = PMX_PWM3_PL_86_VAL, }, }; static struct spear_modemux pwm3_pin_6_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = pwm3_pin_6_muxreg, .nmuxregs = ARRAY_SIZE(pwm3_pin_6_muxreg), }, }; static struct spear_modemux pwm3_pin_12_modemux[] = { { .modes = AUTO_EXP_MODE | SMALL_PRINTERS_MODE | AUTO_NET_SMII_MODE | EXTENDED_MODE, .muxregs = pwm3_muxreg, .nmuxregs = ARRAY_SIZE(pwm3_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = pwm3_pin_12_muxreg, .nmuxregs = ARRAY_SIZE(pwm3_pin_12_muxreg), }, }; static struct spear_modemux pwm3_pin_28_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = pwm3_pin_28_muxreg, .nmuxregs = ARRAY_SIZE(pwm3_pin_28_muxreg), }, }; static struct spear_modemux pwm3_pin_40_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = pwm3_pin_40_muxreg, .nmuxregs = ARRAY_SIZE(pwm3_pin_40_muxreg), }, }; static struct spear_modemux pwm3_pin_57_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = pwm3_pin_57_muxreg, .nmuxregs = ARRAY_SIZE(pwm3_pin_57_muxreg), }, }; static struct spear_modemux pwm3_pin_86_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = pwm3_pin_86_muxreg, .nmuxregs = ARRAY_SIZE(pwm3_pin_86_muxreg), }, }; static struct spear_pingroup pwm3_pingroup[] = { { .name = "pwm3_pin_6_grp", .pins = pwm3_pins[0], .npins = ARRAY_SIZE(pwm3_pins[0]), .modemuxs = pwm3_pin_6_modemux, .nmodemuxs = ARRAY_SIZE(pwm3_pin_6_modemux), }, { .name = "pwm3_pin_12_grp", .pins = pwm3_pins[1], .npins = ARRAY_SIZE(pwm3_pins[1]), .modemuxs = pwm3_pin_12_modemux, .nmodemuxs = ARRAY_SIZE(pwm3_pin_12_modemux), }, { .name = "pwm3_pin_28_grp", .pins = pwm3_pins[2], .npins = ARRAY_SIZE(pwm3_pins[2]), .modemuxs = pwm3_pin_28_modemux, .nmodemuxs = ARRAY_SIZE(pwm3_pin_28_modemux), }, { .name = "pwm3_pin_40_grp", .pins = pwm3_pins[3], .npins = ARRAY_SIZE(pwm3_pins[3]), .modemuxs = pwm3_pin_40_modemux, .nmodemuxs = ARRAY_SIZE(pwm3_pin_40_modemux), }, { .name = "pwm3_pin_57_grp", .pins = pwm3_pins[4], .npins = ARRAY_SIZE(pwm3_pins[4]), .modemuxs = pwm3_pin_57_modemux, .nmodemuxs = ARRAY_SIZE(pwm3_pin_57_modemux), }, { .name = "pwm3_pin_86_grp", .pins = pwm3_pins[5], .npins = ARRAY_SIZE(pwm3_pins[5]), .modemuxs = pwm3_pin_86_modemux, .nmodemuxs = ARRAY_SIZE(pwm3_pin_86_modemux), }, }; static const char *const pwm3_grps[] = { "pwm3_pin_6_grp", "pwm3_pin_12_grp", "pwm3_pin_28_grp", "pwm3_pin_40_grp", "pwm3_pin_57_grp", "pwm3_pin_86_grp" }; static struct spear_function pwm3_function = { .name = "pwm3", .groups = pwm3_grps, .ngroups = ARRAY_SIZE(pwm3_grps), }; /* Pad multiplexing for SSP1 device */ static const unsigned ssp1_pins[][2] = { { 17, 20 }, { 36, 39 }, { 48, 51 }, { 65, 68 }, { 94, 97 } }; static struct spear_muxreg ssp1_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_MII_MASK, .val = 0, }, }; static struct spear_muxreg ssp1_ext_17_20_muxreg[] = { { .reg = IP_SEL_PAD_10_19_REG, .mask = PMX_PL_17_18_MASK | PMX_PL_19_MASK, .val = PMX_SSP1_PL_17_18_19_20_VAL, }, { .reg = IP_SEL_PAD_20_29_REG, .mask = PMX_PL_20_MASK, .val = PMX_SSP1_PL_17_18_19_20_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_SSP1_PORT_SEL_MASK, .val = PMX_SSP1_PORT_17_TO_20_VAL, }, }; static struct spear_muxreg ssp1_ext_36_39_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_UART0_MODEM_MASK | PMX_SSP_CS_MASK, .val = 0, }, { .reg = IP_SEL_PAD_30_39_REG, .mask = PMX_PL_36_MASK | PMX_PL_37_38_MASK | PMX_PL_39_MASK, .val = PMX_SSP1_PL_36_VAL | PMX_SSP1_PL_37_38_VAL | PMX_SSP1_PL_39_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_SSP1_PORT_SEL_MASK, .val = PMX_SSP1_PORT_36_TO_39_VAL, }, }; static struct spear_muxreg ssp1_ext_48_51_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_TIMER_0_1_MASK | PMX_TIMER_2_3_MASK, .val = 0, }, { .reg = IP_SEL_PAD_40_49_REG, .mask = PMX_PL_48_49_MASK, .val = PMX_SSP1_PL_48_49_VAL, }, { .reg = IP_SEL_PAD_50_59_REG, .mask = PMX_PL_50_51_MASK, .val = PMX_SSP1_PL_50_51_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_SSP1_PORT_SEL_MASK, .val = PMX_SSP1_PORT_48_TO_51_VAL, }, }; static struct spear_muxreg ssp1_ext_65_68_muxreg[] = { { .reg = IP_SEL_PAD_60_69_REG, .mask = PMX_PL_65_TO_68_MASK, .val = PMX_SSP1_PL_65_TO_68_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_SSP1_PORT_SEL_MASK, .val = PMX_SSP1_PORT_65_TO_68_VAL, }, }; static struct spear_muxreg ssp1_ext_94_97_muxreg[] = { { .reg = IP_SEL_PAD_90_99_REG, .mask = PMX_PL_94_95_MASK | PMX_PL_96_97_MASK, .val = PMX_SSP1_PL_94_95_VAL | PMX_SSP1_PL_96_97_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_SSP1_PORT_SEL_MASK, .val = PMX_SSP1_PORT_94_TO_97_VAL, }, }; static struct spear_modemux ssp1_17_20_modemux[] = { { .modes = SMALL_PRINTERS_MODE | AUTO_NET_SMII_MODE | EXTENDED_MODE, .muxregs = ssp1_muxreg, .nmuxregs = ARRAY_SIZE(ssp1_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = ssp1_ext_17_20_muxreg, .nmuxregs = ARRAY_SIZE(ssp1_ext_17_20_muxreg), }, }; static struct spear_modemux ssp1_36_39_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = ssp1_ext_36_39_muxreg, .nmuxregs = ARRAY_SIZE(ssp1_ext_36_39_muxreg), }, }; static struct spear_modemux ssp1_48_51_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = ssp1_ext_48_51_muxreg, .nmuxregs = ARRAY_SIZE(ssp1_ext_48_51_muxreg), }, }; static struct spear_modemux ssp1_65_68_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = ssp1_ext_65_68_muxreg, .nmuxregs = ARRAY_SIZE(ssp1_ext_65_68_muxreg), }, }; static struct spear_modemux ssp1_94_97_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = ssp1_ext_94_97_muxreg, .nmuxregs = ARRAY_SIZE(ssp1_ext_94_97_muxreg), }, }; static struct spear_pingroup ssp1_pingroup[] = { { .name = "ssp1_17_20_grp", .pins = ssp1_pins[0], .npins = ARRAY_SIZE(ssp1_pins[0]), .modemuxs = ssp1_17_20_modemux, .nmodemuxs = ARRAY_SIZE(ssp1_17_20_modemux), }, { .name = "ssp1_36_39_grp", .pins = ssp1_pins[1], .npins = ARRAY_SIZE(ssp1_pins[1]), .modemuxs = ssp1_36_39_modemux, .nmodemuxs = ARRAY_SIZE(ssp1_36_39_modemux), }, { .name = "ssp1_48_51_grp", .pins = ssp1_pins[2], .npins = ARRAY_SIZE(ssp1_pins[2]), .modemuxs = ssp1_48_51_modemux, .nmodemuxs = ARRAY_SIZE(ssp1_48_51_modemux), }, { .name = "ssp1_65_68_grp", .pins = ssp1_pins[3], .npins = ARRAY_SIZE(ssp1_pins[3]), .modemuxs = ssp1_65_68_modemux, .nmodemuxs = ARRAY_SIZE(ssp1_65_68_modemux), }, { .name = "ssp1_94_97_grp", .pins = ssp1_pins[4], .npins = ARRAY_SIZE(ssp1_pins[4]), .modemuxs = ssp1_94_97_modemux, .nmodemuxs = ARRAY_SIZE(ssp1_94_97_modemux), }, }; static const char *const ssp1_grps[] = { "ssp1_17_20_grp", "ssp1_36_39_grp", "ssp1_48_51_grp", "ssp1_65_68_grp", "ssp1_94_97_grp" }; static struct spear_function ssp1_function = { .name = "ssp1", .groups = ssp1_grps, .ngroups = ARRAY_SIZE(ssp1_grps), }; /* Pad multiplexing for SSP2 device */ static const unsigned ssp2_pins[][2] = { { 13, 16 }, { 32, 35 }, { 44, 47 }, { 61, 64 }, { 90, 93 } }; static struct spear_muxreg ssp2_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_MII_MASK, .val = 0, }, }; static struct spear_muxreg ssp2_ext_13_16_muxreg[] = { { .reg = IP_SEL_PAD_10_19_REG, .mask = PMX_PL_13_14_MASK | PMX_PL_15_16_MASK, .val = PMX_SSP2_PL_13_14_15_16_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_SSP2_PORT_SEL_MASK, .val = PMX_SSP2_PORT_13_TO_16_VAL, }, }; static struct spear_muxreg ssp2_ext_32_35_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_SSP_CS_MASK | PMX_GPIO_PIN4_MASK | PMX_GPIO_PIN5_MASK, .val = 0, }, { .reg = IP_SEL_PAD_30_39_REG, .mask = PMX_PL_32_33_MASK | PMX_PL_34_MASK | PMX_PL_35_MASK, .val = PMX_SSP2_PL_32_33_VAL | PMX_SSP2_PL_34_VAL | PMX_SSP2_PL_35_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_SSP2_PORT_SEL_MASK, .val = PMX_SSP2_PORT_32_TO_35_VAL, }, }; static struct spear_muxreg ssp2_ext_44_47_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_TIMER_0_1_MASK | PMX_TIMER_2_3_MASK, .val = 0, }, { .reg = IP_SEL_PAD_40_49_REG, .mask = PMX_PL_44_45_MASK | PMX_PL_46_47_MASK, .val = PMX_SSP2_PL_44_45_VAL | PMX_SSP2_PL_46_47_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_SSP2_PORT_SEL_MASK, .val = PMX_SSP2_PORT_44_TO_47_VAL, }, }; static struct spear_muxreg ssp2_ext_61_64_muxreg[] = { { .reg = IP_SEL_PAD_60_69_REG, .mask = PMX_PL_61_TO_64_MASK, .val = PMX_SSP2_PL_61_TO_64_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_SSP2_PORT_SEL_MASK, .val = PMX_SSP2_PORT_61_TO_64_VAL, }, }; static struct spear_muxreg ssp2_ext_90_93_muxreg[] = { { .reg = IP_SEL_PAD_90_99_REG, .mask = PMX_PL_90_91_MASK | PMX_PL_92_93_MASK, .val = PMX_SSP2_PL_90_91_VAL | PMX_SSP2_PL_92_93_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_SSP2_PORT_SEL_MASK, .val = PMX_SSP2_PORT_90_TO_93_VAL, }, }; static struct spear_modemux ssp2_13_16_modemux[] = { { .modes = AUTO_NET_SMII_MODE | EXTENDED_MODE, .muxregs = ssp2_muxreg, .nmuxregs = ARRAY_SIZE(ssp2_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = ssp2_ext_13_16_muxreg, .nmuxregs = ARRAY_SIZE(ssp2_ext_13_16_muxreg), }, }; static struct spear_modemux ssp2_32_35_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = ssp2_ext_32_35_muxreg, .nmuxregs = ARRAY_SIZE(ssp2_ext_32_35_muxreg), }, }; static struct spear_modemux ssp2_44_47_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = ssp2_ext_44_47_muxreg, .nmuxregs = ARRAY_SIZE(ssp2_ext_44_47_muxreg), }, }; static struct spear_modemux ssp2_61_64_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = ssp2_ext_61_64_muxreg, .nmuxregs = ARRAY_SIZE(ssp2_ext_61_64_muxreg), }, }; static struct spear_modemux ssp2_90_93_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = ssp2_ext_90_93_muxreg, .nmuxregs = ARRAY_SIZE(ssp2_ext_90_93_muxreg), }, }; static struct spear_pingroup ssp2_pingroup[] = { { .name = "ssp2_13_16_grp", .pins = ssp2_pins[0], .npins = ARRAY_SIZE(ssp2_pins[0]), .modemuxs = ssp2_13_16_modemux, .nmodemuxs = ARRAY_SIZE(ssp2_13_16_modemux), }, { .name = "ssp2_32_35_grp", .pins = ssp2_pins[1], .npins = ARRAY_SIZE(ssp2_pins[1]), .modemuxs = ssp2_32_35_modemux, .nmodemuxs = ARRAY_SIZE(ssp2_32_35_modemux), }, { .name = "ssp2_44_47_grp", .pins = ssp2_pins[2], .npins = ARRAY_SIZE(ssp2_pins[2]), .modemuxs = ssp2_44_47_modemux, .nmodemuxs = ARRAY_SIZE(ssp2_44_47_modemux), }, { .name = "ssp2_61_64_grp", .pins = ssp2_pins[3], .npins = ARRAY_SIZE(ssp2_pins[3]), .modemuxs = ssp2_61_64_modemux, .nmodemuxs = ARRAY_SIZE(ssp2_61_64_modemux), }, { .name = "ssp2_90_93_grp", .pins = ssp2_pins[4], .npins = ARRAY_SIZE(ssp2_pins[4]), .modemuxs = ssp2_90_93_modemux, .nmodemuxs = ARRAY_SIZE(ssp2_90_93_modemux), }, }; static const char *const ssp2_grps[] = { "ssp2_13_16_grp", "ssp2_32_35_grp", "ssp2_44_47_grp", "ssp2_61_64_grp", "ssp2_90_93_grp" }; static struct spear_function ssp2_function = { .name = "ssp2", .groups = ssp2_grps, .ngroups = ARRAY_SIZE(ssp2_grps), }; /* Pad multiplexing for cadence mii2 as mii device */ static const unsigned mii2_pins[] = { 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97 }; static struct spear_muxreg mii2_muxreg[] = { { .reg = IP_SEL_PAD_80_89_REG, .mask = PMX_PL_80_TO_85_MASK | PMX_PL_86_87_MASK | PMX_PL_88_89_MASK, .val = PMX_MII2_PL_80_TO_85_VAL | PMX_MII2_PL_86_87_VAL | PMX_MII2_PL_88_89_VAL, }, { .reg = IP_SEL_PAD_90_99_REG, .mask = PMX_PL_90_91_MASK | PMX_PL_92_93_MASK | PMX_PL_94_95_MASK | PMX_PL_96_97_MASK, .val = PMX_MII2_PL_90_91_VAL | PMX_MII2_PL_92_93_VAL | PMX_MII2_PL_94_95_VAL | PMX_MII2_PL_96_97_VAL, }, { .reg = EXT_CTRL_REG, .mask = (MAC_MODE_MASK << MAC2_MODE_SHIFT) | (MAC_MODE_MASK << MAC1_MODE_SHIFT) | MII_MDIO_MASK, .val = (MAC_MODE_MII << MAC2_MODE_SHIFT) | (MAC_MODE_MII << MAC1_MODE_SHIFT) | MII_MDIO_81_VAL, }, }; static struct spear_modemux mii2_modemux[] = { { .modes = EXTENDED_MODE, .muxregs = mii2_muxreg, .nmuxregs = ARRAY_SIZE(mii2_muxreg), }, }; static struct spear_pingroup mii2_pingroup = { .name = "mii2_grp", .pins = mii2_pins, .npins = ARRAY_SIZE(mii2_pins), .modemuxs = mii2_modemux, .nmodemuxs = ARRAY_SIZE(mii2_modemux), }; static const char *const mii2_grps[] = { "mii2_grp" }; static struct spear_function mii2_function = { .name = "mii2", .groups = mii2_grps, .ngroups = ARRAY_SIZE(mii2_grps), }; /* Pad multiplexing for cadence mii 1_2 as smii or rmii device */ static const unsigned rmii0_1_pins[] = { 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27 }; static const unsigned smii0_1_pins[] = { 10, 11, 21, 22, 23, 24, 25, 26, 27 }; static struct spear_muxreg mii0_1_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_MII_MASK, .val = 0, }, }; static struct spear_muxreg smii0_1_ext_muxreg[] = { { .reg = IP_SEL_PAD_10_19_REG, .mask = PMX_PL_10_11_MASK, .val = PMX_SMII_PL_10_11_VAL, }, { .reg = IP_SEL_PAD_20_29_REG, .mask = PMX_PL_21_TO_27_MASK, .val = PMX_SMII_PL_21_TO_27_VAL, }, { .reg = EXT_CTRL_REG, .mask = (MAC_MODE_MASK << MAC2_MODE_SHIFT) | (MAC_MODE_MASK << MAC1_MODE_SHIFT) | MII_MDIO_MASK, .val = (MAC_MODE_SMII << MAC2_MODE_SHIFT) | (MAC_MODE_SMII << MAC1_MODE_SHIFT) | MII_MDIO_10_11_VAL, }, }; static struct spear_muxreg rmii0_1_ext_muxreg[] = { { .reg = IP_SEL_PAD_10_19_REG, .mask = PMX_PL_10_11_MASK | PMX_PL_13_14_MASK | PMX_PL_15_16_MASK | PMX_PL_17_18_MASK | PMX_PL_19_MASK, .val = PMX_RMII_PL_10_11_VAL | PMX_RMII_PL_13_14_VAL | PMX_RMII_PL_15_16_VAL | PMX_RMII_PL_17_18_VAL | PMX_RMII_PL_19_VAL, }, { .reg = IP_SEL_PAD_20_29_REG, .mask = PMX_PL_20_MASK | PMX_PL_21_TO_27_MASK, .val = PMX_RMII_PL_20_VAL | PMX_RMII_PL_21_TO_27_VAL, }, { .reg = EXT_CTRL_REG, .mask = (MAC_MODE_MASK << MAC2_MODE_SHIFT) | (MAC_MODE_MASK << MAC1_MODE_SHIFT) | MII_MDIO_MASK, .val = (MAC_MODE_RMII << MAC2_MODE_SHIFT) | (MAC_MODE_RMII << MAC1_MODE_SHIFT) | MII_MDIO_10_11_VAL, }, }; static struct spear_modemux mii0_1_modemux[][2] = { { /* configure as smii */ { .modes = AUTO_NET_SMII_MODE | AUTO_EXP_MODE | SMALL_PRINTERS_MODE | EXTENDED_MODE, .muxregs = mii0_1_muxreg, .nmuxregs = ARRAY_SIZE(mii0_1_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = smii0_1_ext_muxreg, .nmuxregs = ARRAY_SIZE(smii0_1_ext_muxreg), }, }, { /* configure as rmii */ { .modes = AUTO_NET_SMII_MODE | AUTO_EXP_MODE | SMALL_PRINTERS_MODE | EXTENDED_MODE, .muxregs = mii0_1_muxreg, .nmuxregs = ARRAY_SIZE(mii0_1_muxreg), }, { .modes = EXTENDED_MODE, .muxregs = rmii0_1_ext_muxreg, .nmuxregs = ARRAY_SIZE(rmii0_1_ext_muxreg), }, }, }; static struct spear_pingroup mii0_1_pingroup[] = { { .name = "smii0_1_grp", .pins = smii0_1_pins, .npins = ARRAY_SIZE(smii0_1_pins), .modemuxs = mii0_1_modemux[0], .nmodemuxs = ARRAY_SIZE(mii0_1_modemux[0]), }, { .name = "rmii0_1_grp", .pins = rmii0_1_pins, .npins = ARRAY_SIZE(rmii0_1_pins), .modemuxs = mii0_1_modemux[1], .nmodemuxs = ARRAY_SIZE(mii0_1_modemux[1]), }, }; static const char *const mii0_1_grps[] = { "smii0_1_grp", "rmii0_1_grp" }; static struct spear_function mii0_1_function = { .name = "mii0_1", .groups = mii0_1_grps, .ngroups = ARRAY_SIZE(mii0_1_grps), }; /* Pad multiplexing for i2c1 device */ static const unsigned i2c1_pins[][2] = { { 8, 9 }, { 98, 99 } }; static struct spear_muxreg i2c1_ext_8_9_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_SSP_CS_MASK, .val = 0, }, { .reg = IP_SEL_PAD_0_9_REG, .mask = PMX_PL_8_9_MASK, .val = PMX_I2C1_PL_8_9_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_I2C1_PORT_SEL_MASK, .val = PMX_I2C1_PORT_8_9_VAL, }, }; static struct spear_muxreg i2c1_ext_98_99_muxreg[] = { { .reg = IP_SEL_PAD_90_99_REG, .mask = PMX_PL_98_MASK | PMX_PL_99_MASK, .val = PMX_I2C1_PL_98_VAL | PMX_I2C1_PL_99_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_I2C1_PORT_SEL_MASK, .val = PMX_I2C1_PORT_98_99_VAL, }, }; static struct spear_modemux i2c1_modemux[][1] = { { /* Select signals on pins 8-9 */ { .modes = EXTENDED_MODE, .muxregs = i2c1_ext_8_9_muxreg, .nmuxregs = ARRAY_SIZE(i2c1_ext_8_9_muxreg), }, }, { /* Select signals on pins 98-99 */ { .modes = EXTENDED_MODE, .muxregs = i2c1_ext_98_99_muxreg, .nmuxregs = ARRAY_SIZE(i2c1_ext_98_99_muxreg), }, }, }; static struct spear_pingroup i2c1_pingroup[] = { { .name = "i2c1_8_9_grp", .pins = i2c1_pins[0], .npins = ARRAY_SIZE(i2c1_pins[0]), .modemuxs = i2c1_modemux[0], .nmodemuxs = ARRAY_SIZE(i2c1_modemux[0]), }, { .name = "i2c1_98_99_grp", .pins = i2c1_pins[1], .npins = ARRAY_SIZE(i2c1_pins[1]), .modemuxs = i2c1_modemux[1], .nmodemuxs = ARRAY_SIZE(i2c1_modemux[1]), }, }; static const char *const i2c1_grps[] = { "i2c1_8_9_grp", "i2c1_98_99_grp" }; static struct spear_function i2c1_function = { .name = "i2c1", .groups = i2c1_grps, .ngroups = ARRAY_SIZE(i2c1_grps), }; /* Pad multiplexing for i2c2 device */ static const unsigned i2c2_pins[][2] = { { 0, 1 }, { 2, 3 }, { 19, 20 }, { 75, 76 }, { 96, 97 } }; static struct spear_muxreg i2c2_ext_0_1_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_FIRDA_MASK, .val = 0, }, { .reg = IP_SEL_PAD_0_9_REG, .mask = PMX_PL_0_1_MASK, .val = PMX_I2C2_PL_0_1_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_I2C2_PORT_SEL_MASK, .val = PMX_I2C2_PORT_0_1_VAL, }, }; static struct spear_muxreg i2c2_ext_2_3_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_UART0_MASK, .val = 0, }, { .reg = IP_SEL_PAD_0_9_REG, .mask = PMX_PL_2_3_MASK, .val = PMX_I2C2_PL_2_3_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_I2C2_PORT_SEL_MASK, .val = PMX_I2C2_PORT_2_3_VAL, }, }; static struct spear_muxreg i2c2_ext_19_20_muxreg[] = { { .reg = PMX_CONFIG_REG, .mask = PMX_MII_MASK, .val = 0, }, { .reg = IP_SEL_PAD_10_19_REG, .mask = PMX_PL_19_MASK, .val = PMX_I2C2_PL_19_VAL, }, { .reg = IP_SEL_PAD_20_29_REG, .mask = PMX_PL_20_MASK, .val = PMX_I2C2_PL_20_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_I2C2_PORT_SEL_MASK, .val = PMX_I2C2_PORT_19_20_VAL, }, }; static struct spear_muxreg i2c2_ext_75_76_muxreg[] = { { .reg = IP_SEL_PAD_70_79_REG, .mask = PMX_PL_75_76_MASK, .val = PMX_I2C2_PL_75_76_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_I2C2_PORT_SEL_MASK, .val = PMX_I2C2_PORT_75_76_VAL, }, }; static struct spear_muxreg i2c2_ext_96_97_muxreg[] = { { .reg = IP_SEL_PAD_90_99_REG, .mask = PMX_PL_96_97_MASK, .val = PMX_I2C2_PL_96_97_VAL, }, { .reg = IP_SEL_MIX_PAD_REG, .mask = PMX_I2C2_PORT_SEL_MASK, .val = PMX_I2C2_PORT_96_97_VAL, }, }; static struct spear_modemux i2c2_modemux[][1] = { { /* Select signals on pins 0_1 */ { .modes = EXTENDED_MODE, .muxregs = i2c2_ext_0_1_muxreg, .nmuxregs = ARRAY_SIZE(i2c2_ext_0_1_muxreg), }, }, { /* Select signals on pins 2_3 */ { .modes = EXTENDED_MODE, .muxregs = i2c2_ext_2_3_muxreg, .nmuxregs = ARRAY_SIZE(i2c2_ext_2_3_muxreg), }, }, { /* Select signals on pins 19_20 */ { .modes = EXTENDED_MODE, .muxregs = i2c2_ext_19_20_muxreg, .nmuxregs = ARRAY_SIZE(i2c2_ext_19_20_muxreg), }, }, { /* Select signals on pins 75_76 */ { .modes = EXTENDED_MODE, .muxregs = i2c2_ext_75_76_muxreg, .nmuxregs = ARRAY_SIZE(i2c2_ext_75_76_muxreg), }, }, { /* Select signals on pins 96_97 */ { .modes = EXTENDED_MODE, .muxregs = i2c2_ext_96_97_muxreg, .nmuxregs = ARRAY_SIZE(i2c2_ext_96_97_muxreg), }, }, }; static struct spear_pingroup i2c2_pingroup[] = { { .name = "i2c2_0_1_grp", .pins = i2c2_pins[0], .npins = ARRAY_SIZE(i2c2_pins[0]), .modemuxs = i2c2_modemux[0], .nmodemuxs = ARRAY_SIZE(i2c2_modemux[0]), }, { .name = "i2c2_2_3_grp", .pins = i2c2_pins[1], .npins = ARRAY_SIZE(i2c2_pins[1]), .modemuxs = i2c2_modemux[1], .nmodemuxs = ARRAY_SIZE(i2c2_modemux[1]), }, { .name = "i2c2_19_20_grp", .pins = i2c2_pins[2], .npins = ARRAY_SIZE(i2c2_pins[2]), .modemuxs = i2c2_modemux[2], .nmodemuxs = ARRAY_SIZE(i2c2_modemux[2]), }, { .name = "i2c2_75_76_grp", .pins = i2c2_pins[3], .npins = ARRAY_SIZE(i2c2_pins[3]), .modemuxs = i2c2_modemux[3], .nmodemuxs = ARRAY_SIZE(i2c2_modemux[3]), }, { .name = "i2c2_96_97_grp", .pins = i2c2_pins[4], .npins = ARRAY_SIZE(i2c2_pins[4]), .modemuxs = i2c2_modemux[4], .nmodemuxs = ARRAY_SIZE(i2c2_modemux[4]), }, }; static const char *const i2c2_grps[] = { "i2c2_0_1_grp", "i2c2_2_3_grp", "i2c2_19_20_grp", "i2c2_75_76_grp", "i2c2_96_97_grp" }; static struct spear_function i2c2_function = { .name = "i2c2", .groups = i2c2_grps, .ngroups = ARRAY_SIZE(i2c2_grps), }; /* pingroups */ static struct spear_pingroup *spear320_pingroups[] = { SPEAR3XX_COMMON_PINGROUPS, &clcd_pingroup, &emi_pingroup, &fsmc_8bit_pingroup, &fsmc_16bit_pingroup, &spp_pingroup, &sdhci_led_pingroup, &sdhci_pingroup[0], &sdhci_pingroup[1], &i2s_pingroup, &uart1_pingroup, &uart1_modem_pingroup[0], &uart1_modem_pingroup[1], &uart1_modem_pingroup[2], &uart1_modem_pingroup[3], &uart2_pingroup, &uart3_pingroup[0], &uart3_pingroup[1], &uart3_pingroup[2], &uart3_pingroup[3], &uart3_pingroup[4], &uart3_pingroup[5], &uart3_pingroup[6], &uart4_pingroup[0], &uart4_pingroup[1], &uart4_pingroup[2], &uart4_pingroup[3], &uart4_pingroup[4], &uart4_pingroup[5], &uart5_pingroup[0], &uart5_pingroup[1], &uart5_pingroup[2], &uart5_pingroup[3], &uart6_pingroup[0], &uart6_pingroup[1], &rs485_pingroup, &touchscreen_pingroup, &can0_pingroup, &can1_pingroup, &pwm0_1_pingroup[0], &pwm0_1_pingroup[1], &pwm0_1_pingroup[2], &pwm0_1_pingroup[3], &pwm0_1_pingroup[4], &pwm0_1_pingroup[5], &pwm0_1_pingroup[6], &pwm2_pingroup[0], &pwm2_pingroup[1], &pwm2_pingroup[2], &pwm2_pingroup[3], &pwm2_pingroup[4], &pwm2_pingroup[5], &pwm2_pingroup[6], &pwm3_pingroup[0], &pwm3_pingroup[1], &pwm3_pingroup[2], &pwm3_pingroup[3], &pwm3_pingroup[4], &pwm3_pingroup[5], &ssp1_pingroup[0], &ssp1_pingroup[1], &ssp1_pingroup[2], &ssp1_pingroup[3], &ssp1_pingroup[4], &ssp2_pingroup[0], &ssp2_pingroup[1], &ssp2_pingroup[2], &ssp2_pingroup[3], &ssp2_pingroup[4], &mii2_pingroup, &mii0_1_pingroup[0], &mii0_1_pingroup[1], &i2c1_pingroup[0], &i2c1_pingroup[1], &i2c2_pingroup[0], &i2c2_pingroup[1], &i2c2_pingroup[2], &i2c2_pingroup[3], &i2c2_pingroup[4], }; /* functions */ static struct spear_function *spear320_functions[] = { SPEAR3XX_COMMON_FUNCTIONS, &clcd_function, &emi_function, &fsmc_function, &spp_function, &sdhci_function, &i2s_function, &uart1_function, &uart1_modem_function, &uart2_function, &uart3_function, &uart4_function, &uart5_function, &uart6_function, &rs485_function, &touchscreen_function, &can0_function, &can1_function, &pwm0_1_function, &pwm2_function, &pwm3_function, &ssp1_function, &ssp2_function, &mii2_function, &mii0_1_function, &i2c1_function, &i2c2_function, }; static const struct of_device_id spear320_pinctrl_of_match[] = { { .compatible = "st,spear320-pinmux", }, {}, }; static int spear320_pinctrl_probe(struct platform_device *pdev) { int ret; spear3xx_machdata.groups = spear320_pingroups; spear3xx_machdata.ngroups = ARRAY_SIZE(spear320_pingroups); spear3xx_machdata.functions = spear320_functions; spear3xx_machdata.nfunctions = ARRAY_SIZE(spear320_functions); spear3xx_machdata.modes_supported = true; spear3xx_machdata.pmx_modes = spear320_pmx_modes; spear3xx_machdata.npmx_modes = ARRAY_SIZE(spear320_pmx_modes); pmx_init_addr(&spear3xx_machdata, PMX_CONFIG_REG); pmx_init_gpio_pingroup_addr(spear3xx_machdata.gpio_pingroups, spear3xx_machdata.ngpio_pingroups, PMX_CONFIG_REG); ret = spear_pinctrl_probe(pdev, &spear3xx_machdata); if (ret) return ret; return 0; } static int spear320_pinctrl_remove(struct platform_device *pdev) { return spear_pinctrl_remove(pdev); } static struct platform_driver spear320_pinctrl_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = spear320_pinctrl_of_match, }, .probe = spear320_pinctrl_probe, .remove = spear320_pinctrl_remove, }; static int __init spear320_pinctrl_init(void) { return platform_driver_register(&spear320_pinctrl_driver); } arch_initcall(spear320_pinctrl_init); static void __exit spear320_pinctrl_exit(void) { platform_driver_unregister(&spear320_pinctrl_driver); } module_exit(spear320_pinctrl_exit); MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match);
gpl-2.0