repo_name
string
path
string
copies
string
size
string
content
string
license
string
nrgmilk/linux
drivers/video/omap/lcd_apollon.c
3229
2995
/* * LCD panel support for the Samsung OMAP2 Apollon board * * Copyright (C) 2005,2006 Samsung Electronics * Author: Kyungmin Park <kyungmin.park@samsung.com> * * Derived from drivers/video/omap/lcd-h4.c * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/platform_device.h> #include <mach/gpio.h> #include "omapfb.h" /* #define USE_35INCH_LCD 1 */ static int apollon_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) { return 0; } static void apollon_panel_cleanup(struct lcd_panel *panel) { } static int apollon_panel_enable(struct lcd_panel *panel) { return 0; } static void apollon_panel_disable(struct lcd_panel *panel) { } static unsigned long apollon_panel_get_caps(struct lcd_panel *panel) { return 0; } struct lcd_panel apollon_panel = { .name = "apollon", .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC | OMAP_LCDC_INV_HSYNC, .bpp = 16, .data_lines = 18, #ifdef USE_35INCH_LCD .x_res = 240, .y_res = 320, .hsw = 2, .hfp = 3, .hbp = 9, .vsw = 4, .vfp = 3, .vbp = 5, #else .x_res = 480, .y_res = 272, .hsw = 41, .hfp = 2, .hbp = 2, .vsw = 10, .vfp = 2, .vbp = 2, #endif .pixel_clock = 6250, .init = apollon_panel_init, .cleanup = apollon_panel_cleanup, .enable = apollon_panel_enable, .disable = apollon_panel_disable, .get_caps = apollon_panel_get_caps, }; static int apollon_panel_probe(struct platform_device *pdev) { omapfb_register_panel(&apollon_panel); return 0; } static int apollon_panel_remove(struct platform_device *pdev) { return 0; } static int apollon_panel_suspend(struct platform_device *pdev, pm_message_t mesg) { return 0; } static int apollon_panel_resume(struct platform_device *pdev) { return 0; } struct platform_driver apollon_panel_driver = { .probe = apollon_panel_probe, .remove = apollon_panel_remove, .suspend = apollon_panel_suspend, .resume = apollon_panel_resume, .driver = { .name = "apollon_lcd", .owner = THIS_MODULE, }, }; static int __init apollon_panel_drv_init(void) { return platform_driver_register(&apollon_panel_driver); } static void __exit apollon_panel_drv_exit(void) { platform_driver_unregister(&apollon_panel_driver); } module_init(apollon_panel_drv_init); module_exit(apollon_panel_drv_exit);
gpl-2.0
wpandroidios/android_kernel_htc_b2wlj_LP50_Sense7
arch/m68k/kernel/signal_mm.c
4509
30213
/* * linux/arch/m68k/kernel/signal.c * * Copyright (C) 1991, 1992 Linus Torvalds * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ /* * Linux/m68k support by Hamish Macdonald * * 68060 fixes by Jesper Skov * * 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab * * mathemu support by Roman Zippel * (Note: fpstate in the signal context is completely ignored for the emulator * and the internal floating point format is put on stack) */ /* * ++roman (07/09/96): implemented signal stacks (specially for tosemu on * Atari :-) Current limitation: Only one sigstack can be active at one time. * If a second signal with SA_ONSTACK set arrives while working on a sigstack, * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested * signal handlers! */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/syscalls.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/highuid.h> #include <linux/personality.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/module.h> #include <asm/setup.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/traps.h> #include <asm/ucontext.h> #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) static const int frame_extra_sizes[16] = { [1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */ [2] = sizeof(((struct frame *)0)->un.fmt2), [3] = sizeof(((struct frame *)0)->un.fmt3), #ifdef CONFIG_COLDFIRE [4] = 0, #else [4] = sizeof(((struct frame *)0)->un.fmt4), #endif [5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */ [6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */ [7] = sizeof(((struct frame *)0)->un.fmt7), [8] = -1, /* sizeof(((struct frame *)0)->un.fmt8), */ [9] = sizeof(((struct frame *)0)->un.fmt9), [10] = sizeof(((struct frame *)0)->un.fmta), [11] = sizeof(((struct frame *)0)->un.fmtb), [12] = -1, /* sizeof(((struct frame *)0)->un.fmtc), */ [13] = -1, /* sizeof(((struct frame *)0)->un.fmtd), */ [14] = -1, /* sizeof(((struct frame *)0)->un.fmte), */ [15] = -1, /* sizeof(((struct frame *)0)->un.fmtf), */ }; int handle_kernel_fault(struct pt_regs *regs) { const struct exception_table_entry *fixup; struct pt_regs *tregs; /* Are we prepared to handle this kernel fault? */ fixup = search_exception_tables(regs->pc); if (!fixup) return 0; /* Create a new four word stack frame, discarding the old one. */ regs->stkadj = frame_extra_sizes[regs->format]; tregs = (struct pt_regs *)((long)regs + regs->stkadj); tregs->vector = regs->vector; #ifdef CONFIG_COLDFIRE tregs->format = 4; #else tregs->format = 0; #endif tregs->pc = fixup->fixup; tregs->sr = regs->sr; return 1; } /* * Atomically swap in the new signal mask, and wait for a signal. */ asmlinkage int sys_sigsuspend(int unused0, int unused1, old_sigset_t mask) { mask &= _BLOCKABLE; spin_lock_irq(&current->sighand->siglock); current->saved_sigmask = current->blocked; siginitset(&current->blocked, mask); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); current->state = TASK_INTERRUPTIBLE; schedule(); set_restore_sigmask(); return -ERESTARTNOHAND; } asmlinkage int sys_sigaction(int sig, const struct old_sigaction __user *act, struct old_sigaction __user *oact) { struct k_sigaction new_ka, old_ka; int ret; if (act) { old_sigset_t mask; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || __get_user(new_ka.sa.sa_flags, &act->sa_flags) || __get_user(mask, &act->sa_mask)) return -EFAULT; siginitset(&new_ka.sa.sa_mask, mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; } return ret; } asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) { return do_sigaltstack(uss, uoss, rdusp()); } /* * Do a signal return; undo the signal stack. * * Keep the return code on the stack quadword aligned! * That makes the cache flush below easier. */ struct sigframe { char __user *pretcode; int sig; int code; struct sigcontext __user *psc; char retcode[8]; unsigned long extramask[_NSIG_WORDS-1]; struct sigcontext sc; }; struct rt_sigframe { char __user *pretcode; int sig; struct siginfo __user *pinfo; void __user *puc; char retcode[8]; struct siginfo info; struct ucontext uc; }; static unsigned char fpu_version; /* version number of fpu, set by setup_frame */ static inline int restore_fpu_state(struct sigcontext *sc) { int err = 1; if (FPU_IS_EMU) { /* restore registers */ memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12); memcpy(current->thread.fp, sc->sc_fpregs, 24); return 0; } if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { /* Verify the frame format. */ if (!(CPU_IS_060 || CPU_IS_COLDFIRE) && (sc->sc_fpstate[0] != fpu_version)) goto out; if (CPU_IS_020_OR_030) { if (m68k_fputype & FPU_68881 && !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4)) goto out; if (m68k_fputype & FPU_68882 && !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4)) goto out; } else if (CPU_IS_040) { if (!(sc->sc_fpstate[1] == 0x00 || sc->sc_fpstate[1] == 0x28 || sc->sc_fpstate[1] == 0x60)) goto out; } else if (CPU_IS_060) { if (!(sc->sc_fpstate[3] == 0x00 || sc->sc_fpstate[3] == 0x60 || sc->sc_fpstate[3] == 0xe0)) goto out; } else if (CPU_IS_COLDFIRE) { if (!(sc->sc_fpstate[0] == 0x00 || sc->sc_fpstate[0] == 0x05 || sc->sc_fpstate[0] == 0xe5)) goto out; } else goto out; if (CPU_IS_COLDFIRE) { __asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t" "fmovel %1,%%fpcr\n\t" "fmovel %2,%%fpsr\n\t" "fmovel %3,%%fpiar" : /* no outputs */ : "m" (sc->sc_fpregs[0]), "m" (sc->sc_fpcntl[0]), "m" (sc->sc_fpcntl[1]), "m" (sc->sc_fpcntl[2])); } else { __asm__ volatile (".chip 68k/68881\n\t" "fmovemx %0,%%fp0-%%fp1\n\t" "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" ".chip 68k" : /* no outputs */ : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl)); } } if (CPU_IS_COLDFIRE) { __asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate)); } else { __asm__ volatile (".chip 68k/68881\n\t" "frestore %0\n\t" ".chip 68k" : : "m" (*sc->sc_fpstate)); } err = 0; out: return err; } #define FPCONTEXT_SIZE 216 #define uc_fpstate uc_filler[0] #define uc_formatvec uc_filler[FPCONTEXT_SIZE/4] #define uc_extra uc_filler[FPCONTEXT_SIZE/4+1] static inline int rt_restore_fpu_state(struct ucontext __user *uc) { unsigned char fpstate[FPCONTEXT_SIZE]; int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0); fpregset_t fpregs; int err = 1; if (FPU_IS_EMU) { /* restore fpu control register */ if (__copy_from_user(current->thread.fpcntl, uc->uc_mcontext.fpregs.f_fpcntl, 12)) goto out; /* restore all other fpu register */ if (__copy_from_user(current->thread.fp, uc->uc_mcontext.fpregs.f_fpregs, 96)) goto out; return 0; } if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate)) goto out; if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { if (!(CPU_IS_060 || CPU_IS_COLDFIRE)) context_size = fpstate[1]; /* Verify the frame format. */ if (!(CPU_IS_060 || CPU_IS_COLDFIRE) && (fpstate[0] != fpu_version)) goto out; if (CPU_IS_020_OR_030) { if (m68k_fputype & FPU_68881 && !(context_size == 0x18 || context_size == 0xb4)) goto out; if (m68k_fputype & FPU_68882 && !(context_size == 0x38 || context_size == 0xd4)) goto out; } else if (CPU_IS_040) { if (!(context_size == 0x00 || context_size == 0x28 || context_size == 0x60)) goto out; } else if (CPU_IS_060) { if (!(fpstate[3] == 0x00 || fpstate[3] == 0x60 || fpstate[3] == 0xe0)) goto out; } else if (CPU_IS_COLDFIRE) { if (!(fpstate[3] == 0x00 || fpstate[3] == 0x05 || fpstate[3] == 0xe5)) goto out; } else goto out; if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs, sizeof(fpregs))) goto out; if (CPU_IS_COLDFIRE) { __asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t" "fmovel %1,%%fpcr\n\t" "fmovel %2,%%fpsr\n\t" "fmovel %3,%%fpiar" : /* no outputs */ : "m" (fpregs.f_fpregs[0]), "m" (fpregs.f_fpcntl[0]), "m" (fpregs.f_fpcntl[1]), "m" (fpregs.f_fpcntl[2])); } else { __asm__ volatile (".chip 68k/68881\n\t" "fmovemx %0,%%fp0-%%fp7\n\t" "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" ".chip 68k" : /* no outputs */ : "m" (*fpregs.f_fpregs), "m" (*fpregs.f_fpcntl)); } } if (context_size && __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1, context_size)) goto out; if (CPU_IS_COLDFIRE) { __asm__ volatile ("frestore %0" : : "m" (*fpstate)); } else { __asm__ volatile (".chip 68k/68881\n\t" "frestore %0\n\t" ".chip 68k" : : "m" (*fpstate)); } err = 0; out: return err; } static int mangle_kernel_stack(struct pt_regs *regs, int formatvec, void __user *fp) { int fsize = frame_extra_sizes[formatvec >> 12]; if (fsize < 0) { /* * user process trying to return with weird frame format */ #ifdef DEBUG printk("user process returning with weird frame format\n"); #endif return 1; } if (!fsize) { regs->format = formatvec >> 12; regs->vector = formatvec & 0xfff; } else { struct switch_stack *sw = (struct switch_stack *)regs - 1; unsigned long buf[fsize / 2]; /* yes, twice as much */ /* that'll make sure that expansion won't crap over data */ if (copy_from_user(buf + fsize / 4, fp, fsize)) return 1; /* point of no return */ regs->format = formatvec >> 12; regs->vector = formatvec & 0xfff; #define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack)) __asm__ __volatile__ ( #ifdef CONFIG_COLDFIRE " movel %0,%/sp\n\t" " bra ret_from_signal\n" #else " movel %0,%/a0\n\t" " subl %1,%/a0\n\t" /* make room on stack */ " movel %/a0,%/sp\n\t" /* set stack pointer */ /* move switch_stack and pt_regs */ "1: movel %0@+,%/a0@+\n\t" " dbra %2,1b\n\t" " lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */ " lsrl #2,%1\n\t" " subql #1,%1\n\t" /* copy to the gap we'd made */ "2: movel %4@+,%/a0@+\n\t" " dbra %1,2b\n\t" " bral ret_from_signal\n" #endif : /* no outputs, it doesn't ever return */ : "a" (sw), "d" (fsize), "d" (frame_offset/4-1), "n" (frame_offset), "a" (buf + fsize/4) : "a0"); #undef frame_offset } return 0; } static inline int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp) { int formatvec; struct sigcontext context; int err; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; /* get previous context */ if (copy_from_user(&context, usc, sizeof(context))) goto badframe; /* restore passed registers */ regs->d0 = context.sc_d0; regs->d1 = context.sc_d1; regs->a0 = context.sc_a0; regs->a1 = context.sc_a1; regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff); regs->pc = context.sc_pc; regs->orig_d0 = -1; /* disable syscall checks */ wrusp(context.sc_usp); formatvec = context.sc_formatvec; err = restore_fpu_state(&context); if (err || mangle_kernel_stack(regs, formatvec, fp)) goto badframe; return 0; badframe: return 1; } static inline int rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw, struct ucontext __user *uc) { int temp; greg_t __user *gregs = uc->uc_mcontext.gregs; unsigned long usp; int err; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; err = __get_user(temp, &uc->uc_mcontext.version); if (temp != MCONTEXT_VERSION) goto badframe; /* restore passed registers */ err |= __get_user(regs->d0, &gregs[0]); err |= __get_user(regs->d1, &gregs[1]); err |= __get_user(regs->d2, &gregs[2]); err |= __get_user(regs->d3, &gregs[3]); err |= __get_user(regs->d4, &gregs[4]); err |= __get_user(regs->d5, &gregs[5]); err |= __get_user(sw->d6, &gregs[6]); err |= __get_user(sw->d7, &gregs[7]); err |= __get_user(regs->a0, &gregs[8]); err |= __get_user(regs->a1, &gregs[9]); err |= __get_user(regs->a2, &gregs[10]); err |= __get_user(sw->a3, &gregs[11]); err |= __get_user(sw->a4, &gregs[12]); err |= __get_user(sw->a5, &gregs[13]); err |= __get_user(sw->a6, &gregs[14]); err |= __get_user(usp, &gregs[15]); wrusp(usp); err |= __get_user(regs->pc, &gregs[16]); err |= __get_user(temp, &gregs[17]); regs->sr = (regs->sr & 0xff00) | (temp & 0xff); regs->orig_d0 = -1; /* disable syscall checks */ err |= __get_user(temp, &uc->uc_formatvec); err |= rt_restore_fpu_state(uc); if (err || do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT) goto badframe; if (mangle_kernel_stack(regs, temp, &uc->uc_extra)) goto badframe; return 0; badframe: return 1; } asmlinkage int do_sigreturn(unsigned long __unused) { struct switch_stack *sw = (struct switch_stack *) &__unused; struct pt_regs *regs = (struct pt_regs *) (sw + 1); unsigned long usp = rdusp(); struct sigframe __user *frame = (struct sigframe __user *)(usp - 4); sigset_t set; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.sc_mask) || (_NSIG_WORDS > 1 && __copy_from_user(&set.sig[1], &frame->extramask, sizeof(frame->extramask)))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); current->blocked = set; recalc_sigpending(); if (restore_sigcontext(regs, &frame->sc, frame + 1)) goto badframe; return regs->d0; badframe: force_sig(SIGSEGV, current); return 0; } asmlinkage int do_rt_sigreturn(unsigned long __unused) { struct switch_stack *sw = (struct switch_stack *) &__unused; struct pt_regs *regs = (struct pt_regs *) (sw + 1); unsigned long usp = rdusp(); struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4); sigset_t set; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); current->blocked = set; recalc_sigpending(); if (rt_restore_ucontext(regs, sw, &frame->uc)) goto badframe; return regs->d0; badframe: force_sig(SIGSEGV, current); return 0; } /* * Set up a signal frame. */ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs) { if (FPU_IS_EMU) { /* save registers */ memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12); memcpy(sc->sc_fpregs, current->thread.fp, 24); return; } if (CPU_IS_COLDFIRE) { __asm__ volatile ("fsave %0" : : "m" (*sc->sc_fpstate) : "memory"); } else { __asm__ volatile (".chip 68k/68881\n\t" "fsave %0\n\t" ".chip 68k" : : "m" (*sc->sc_fpstate) : "memory"); } if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { fpu_version = sc->sc_fpstate[0]; if (CPU_IS_020_OR_030 && regs->vector >= (VEC_FPBRUC * 4) && regs->vector <= (VEC_FPNAN * 4)) { /* Clear pending exception in 68882 idle frame */ if (*(unsigned short *) sc->sc_fpstate == 0x1f38) sc->sc_fpstate[0x38] |= 1 << 3; } if (CPU_IS_COLDFIRE) { __asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t" "fmovel %%fpcr,%1\n\t" "fmovel %%fpsr,%2\n\t" "fmovel %%fpiar,%3" : "=m" (sc->sc_fpregs[0]), "=m" (sc->sc_fpcntl[0]), "=m" (sc->sc_fpcntl[1]), "=m" (sc->sc_fpcntl[2]) : /* no inputs */ : "memory"); } else { __asm__ volatile (".chip 68k/68881\n\t" "fmovemx %%fp0-%%fp1,%0\n\t" "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" ".chip 68k" : "=m" (*sc->sc_fpregs), "=m" (*sc->sc_fpcntl) : /* no inputs */ : "memory"); } } } static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs) { unsigned char fpstate[FPCONTEXT_SIZE]; int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0); int err = 0; if (FPU_IS_EMU) { /* save fpu control register */ err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl, current->thread.fpcntl, 12); /* save all other fpu register */ err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs, current->thread.fp, 96); return err; } if (CPU_IS_COLDFIRE) { __asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory"); } else { __asm__ volatile (".chip 68k/68881\n\t" "fsave %0\n\t" ".chip 68k" : : "m" (*fpstate) : "memory"); } err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate); if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { fpregset_t fpregs; if (!(CPU_IS_060 || CPU_IS_COLDFIRE)) context_size = fpstate[1]; fpu_version = fpstate[0]; if (CPU_IS_020_OR_030 && regs->vector >= (VEC_FPBRUC * 4) && regs->vector <= (VEC_FPNAN * 4)) { /* Clear pending exception in 68882 idle frame */ if (*(unsigned short *) fpstate == 0x1f38) fpstate[0x38] |= 1 << 3; } if (CPU_IS_COLDFIRE) { __asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t" "fmovel %%fpcr,%1\n\t" "fmovel %%fpsr,%2\n\t" "fmovel %%fpiar,%3" : "=m" (fpregs.f_fpregs[0]), "=m" (fpregs.f_fpcntl[0]), "=m" (fpregs.f_fpcntl[1]), "=m" (fpregs.f_fpcntl[2]) : /* no inputs */ : "memory"); } else { __asm__ volatile (".chip 68k/68881\n\t" "fmovemx %%fp0-%%fp7,%0\n\t" "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" ".chip 68k" : "=m" (*fpregs.f_fpregs), "=m" (*fpregs.f_fpcntl) : /* no inputs */ : "memory"); } err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs, sizeof(fpregs)); } if (context_size) err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4, context_size); return err; } static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, unsigned long mask) { sc->sc_mask = mask; sc->sc_usp = rdusp(); sc->sc_d0 = regs->d0; sc->sc_d1 = regs->d1; sc->sc_a0 = regs->a0; sc->sc_a1 = regs->a1; sc->sc_sr = regs->sr; sc->sc_pc = regs->pc; sc->sc_formatvec = regs->format << 12 | regs->vector; save_fpu_state(sc, regs); } static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs) { struct switch_stack *sw = (struct switch_stack *)regs - 1; greg_t __user *gregs = uc->uc_mcontext.gregs; int err = 0; err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); err |= __put_user(regs->d0, &gregs[0]); err |= __put_user(regs->d1, &gregs[1]); err |= __put_user(regs->d2, &gregs[2]); err |= __put_user(regs->d3, &gregs[3]); err |= __put_user(regs->d4, &gregs[4]); err |= __put_user(regs->d5, &gregs[5]); err |= __put_user(sw->d6, &gregs[6]); err |= __put_user(sw->d7, &gregs[7]); err |= __put_user(regs->a0, &gregs[8]); err |= __put_user(regs->a1, &gregs[9]); err |= __put_user(regs->a2, &gregs[10]); err |= __put_user(sw->a3, &gregs[11]); err |= __put_user(sw->a4, &gregs[12]); err |= __put_user(sw->a5, &gregs[13]); err |= __put_user(sw->a6, &gregs[14]); err |= __put_user(rdusp(), &gregs[15]); err |= __put_user(regs->pc, &gregs[16]); err |= __put_user(regs->sr, &gregs[17]); err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec); err |= rt_save_fpu_state(uc, regs); return err; } static inline void push_cache (unsigned long vaddr) { /* * Using the old cache_push_v() was really a big waste. * * What we are trying to do is to flush 8 bytes to ram. * Flushing 2 cache lines of 16 bytes is much cheaper than * flushing 1 or 2 pages, as previously done in * cache_push_v(). * Jes */ if (CPU_IS_040) { unsigned long temp; __asm__ __volatile__ (".chip 68040\n\t" "nop\n\t" "ptestr (%1)\n\t" "movec %%mmusr,%0\n\t" ".chip 68k" : "=r" (temp) : "a" (vaddr)); temp &= PAGE_MASK; temp |= vaddr & ~PAGE_MASK; __asm__ __volatile__ (".chip 68040\n\t" "nop\n\t" "cpushl %%bc,(%0)\n\t" ".chip 68k" : : "a" (temp)); } else if (CPU_IS_060) { unsigned long temp; __asm__ __volatile__ (".chip 68060\n\t" "plpar (%0)\n\t" ".chip 68k" : "=a" (temp) : "0" (vaddr)); __asm__ __volatile__ (".chip 68060\n\t" "cpushl %%bc,(%0)\n\t" ".chip 68k" : : "a" (temp)); } else if (!CPU_IS_COLDFIRE) { /* * 68030/68020 have no writeback cache; * still need to clear icache. * Note that vaddr is guaranteed to be long word aligned. */ unsigned long temp; asm volatile ("movec %%cacr,%0" : "=r" (temp)); temp += 4; asm volatile ("movec %0,%%caar\n\t" "movec %1,%%cacr" : : "r" (vaddr), "r" (temp)); asm volatile ("movec %0,%%caar\n\t" "movec %1,%%cacr" : : "r" (vaddr + 4), "r" (temp)); } } static inline void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) { unsigned long usp; /* Default to using normal stack. */ usp = rdusp(); /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { if (!sas_ss_flags(usp)) usp = current->sas_ss_sp + current->sas_ss_size; } return (void __user *)((usp - frame_size) & -8UL); } static int setup_frame (int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame; int fsize = frame_extra_sizes[regs->format]; struct sigcontext context; int err = 0; if (fsize < 0) { #ifdef DEBUG printk ("setup_frame: Unknown frame format %#x\n", regs->format); #endif goto give_sigsegv; } frame = get_sigframe(ka, regs, sizeof(*frame) + fsize); if (fsize) err |= copy_to_user (frame + 1, regs + 1, fsize); err |= __put_user((current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig), &frame->sig); err |= __put_user(regs->vector, &frame->code); err |= __put_user(&frame->sc, &frame->psc); if (_NSIG_WORDS > 1) err |= copy_to_user(frame->extramask, &set->sig[1], sizeof(frame->extramask)); setup_sigcontext(&context, regs, set->sig[0]); err |= copy_to_user (&frame->sc, &context, sizeof(context)); /* Set up to return from userspace. */ err |= __put_user(frame->retcode, &frame->pretcode); /* moveq #,d0; trap #0 */ err |= __put_user(0x70004e40 + (__NR_sigreturn << 16), (long __user *)(frame->retcode)); if (err) goto give_sigsegv; push_cache ((unsigned long) &frame->retcode); /* * Set up registers for signal handler. All the state we are about * to destroy is successfully copied to sigframe. */ wrusp ((unsigned long) frame); regs->pc = (unsigned long) ka->sa.sa_handler; /* * This is subtle; if we build more than one sigframe, all but the * first one will see frame format 0 and have fsize == 0, so we won't * screw stkadj. */ if (fsize) regs->stkadj = fsize; /* Prepare to skip over the extra stuff in the exception frame. */ if (regs->stkadj) { struct pt_regs *tregs = (struct pt_regs *)((ulong)regs + regs->stkadj); #ifdef DEBUG printk("Performing stackadjust=%04x\n", regs->stkadj); #endif /* This must be copied with decreasing addresses to handle overlaps. */ tregs->vector = 0; tregs->format = 0; tregs->pc = regs->pc; tregs->sr = regs->sr; } return 0; give_sigsegv: force_sigsegv(sig, current); return err; } static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; int fsize = frame_extra_sizes[regs->format]; int err = 0; if (fsize < 0) { #ifdef DEBUG printk ("setup_frame: Unknown frame format %#x\n", regs->format); #endif goto give_sigsegv; } frame = get_sigframe(ka, regs, sizeof(*frame)); if (fsize) err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize); err |= __put_user((current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig), &frame->sig); err |= __put_user(&frame->info, &frame->pinfo); err |= __put_user(&frame->uc, &frame->puc); err |= copy_siginfo_to_user(&frame->info, info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); err |= __put_user((void __user *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(rdusp()), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= rt_setup_ucontext(&frame->uc, regs); err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set)); /* Set up to return from userspace. */ err |= __put_user(frame->retcode, &frame->pretcode); #ifdef __mcoldfire__ /* movel #__NR_rt_sigreturn,d0; trap #0 */ err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0)); err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16), (long __user *)(frame->retcode + 4)); #else /* moveq #,d0; notb d0; trap #0 */ err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16), (long __user *)(frame->retcode + 0)); err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4)); #endif if (err) goto give_sigsegv; push_cache ((unsigned long) &frame->retcode); /* * Set up registers for signal handler. All the state we are about * to destroy is successfully copied to sigframe. */ wrusp ((unsigned long) frame); regs->pc = (unsigned long) ka->sa.sa_handler; /* * This is subtle; if we build more than one sigframe, all but the * first one will see frame format 0 and have fsize == 0, so we won't * screw stkadj. */ if (fsize) regs->stkadj = fsize; /* Prepare to skip over the extra stuff in the exception frame. */ if (regs->stkadj) { struct pt_regs *tregs = (struct pt_regs *)((ulong)regs + regs->stkadj); #ifdef DEBUG printk("Performing stackadjust=%04x\n", regs->stkadj); #endif /* This must be copied with decreasing addresses to handle overlaps. */ tregs->vector = 0; tregs->format = 0; tregs->pc = regs->pc; tregs->sr = regs->sr; } return 0; give_sigsegv: force_sigsegv(sig, current); return err; } static inline void handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) { switch (regs->d0) { case -ERESTARTNOHAND: if (!has_handler) goto do_restart; regs->d0 = -EINTR; break; case -ERESTART_RESTARTBLOCK: if (!has_handler) { regs->d0 = __NR_restart_syscall; regs->pc -= 2; break; } regs->d0 = -EINTR; break; case -ERESTARTSYS: if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) { regs->d0 = -EINTR; break; } /* fallthrough */ case -ERESTARTNOINTR: do_restart: regs->d0 = regs->orig_d0; regs->pc -= 2; break; } } void ptrace_signal_deliver(struct pt_regs *regs, void *cookie) { if (regs->orig_d0 < 0) return; switch (regs->d0) { case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: regs->d0 = regs->orig_d0; regs->orig_d0 = -1; regs->pc -= 2; break; } } /* * OK, we're invoking a handler */ static void handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { int err; /* are we from a system call? */ if (regs->orig_d0 >= 0) /* If so, check system call restarting.. */ handle_restart(regs, ka, 1); /* set up the stack frame */ if (ka->sa.sa_flags & SA_SIGINFO) err = setup_rt_frame(sig, ka, info, oldset, regs); else err = setup_frame(sig, ka, oldset, regs); if (err) return; sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NODEFER)) sigaddset(&current->blocked,sig); recalc_sigpending(); if (test_thread_flag(TIF_DELAYED_TRACE)) { regs->sr &= ~0x8000; send_sig(SIGTRAP, current, 1); } clear_thread_flag(TIF_RESTORE_SIGMASK); } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ asmlinkage void do_signal(struct pt_regs *regs) { siginfo_t info; struct k_sigaction ka; int signr; sigset_t *oldset; current->thread.esp0 = (unsigned long) regs; if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = &current->saved_sigmask; else oldset = &current->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ handle_signal(signr, &ka, &info, oldset, regs); return; } /* Did we come from a system call? */ if (regs->orig_d0 >= 0) /* Restart the system call - no handlers present */ handle_restart(regs, NULL, 0); /* If there's no signal to deliver, we just restore the saved mask. */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { clear_thread_flag(TIF_RESTORE_SIGMASK); sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } }
gpl-2.0
StarkDroid/android_kernel_motorola_msm8610
drivers/usb/gadget/ether.c
5021
12245
/* * ether.c -- Ethernet gadget driver, with CDC and non-CDC options * * Copyright (C) 2003-2005,2008 David Brownell * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger * Copyright (C) 2008 Nokia Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ /* #define VERBOSE_DEBUG */ #include <linux/kernel.h> #include <linux/utsname.h> #if defined USB_ETH_RNDIS # undef USB_ETH_RNDIS #endif #ifdef CONFIG_USB_ETH_RNDIS # define USB_ETH_RNDIS y #endif #include "u_ether.h" /* * Ethernet gadget driver -- with CDC and non-CDC options * Builds on hardware support for a full duplex link. * * CDC Ethernet is the standard USB solution for sending Ethernet frames * using USB. Real hardware tends to use the same framing protocol but look * different for control features. This driver strongly prefers to use * this USB-IF standard as its open-systems interoperability solution; * most host side USB stacks (except from Microsoft) support it. * * This is sometimes called "CDC ECM" (Ethernet Control Model) to support * TLA-soup. "CDC ACM" (Abstract Control Model) is for modems, and a new * "CDC EEM" (Ethernet Emulation Model) is starting to spread. * * There's some hardware that can't talk CDC ECM. We make that hardware * implement a "minimalist" vendor-agnostic CDC core: same framing, but * link-level setup only requires activating the configuration. Only the * endpoint descriptors, and product/vendor IDs, are relevant; no control * operations are available. Linux supports it, but other host operating * systems may not. (This is a subset of CDC Ethernet.) * * It turns out that if you add a few descriptors to that "CDC Subset", * (Windows) host side drivers from MCCI can treat it as one submode of * a proprietary scheme called "SAFE" ... without needing to know about * specific product/vendor IDs. So we do that, making it easier to use * those MS-Windows drivers. Those added descriptors make it resemble a * CDC MDLM device, but they don't change device behavior at all. (See * MCCI Engineering report 950198 "SAFE Networking Functions".) * * A third option is also in use. Rather than CDC Ethernet, or something * simpler, Microsoft pushes their own approach: RNDIS. The published * RNDIS specs are ambiguous and appear to be incomplete, and are also * needlessly complex. They borrow more from CDC ACM than CDC ECM. */ #define DRIVER_DESC "Ethernet Gadget" #define DRIVER_VERSION "Memorial Day 2008" #ifdef USB_ETH_RNDIS #define PREFIX "RNDIS/" #else #define PREFIX "" #endif /* * This driver aims for interoperability by using CDC ECM unless * * can_support_ecm() * * returns false, in which case it supports the CDC Subset. By default, * that returns true; most hardware has no problems with CDC ECM, that's * a good default. Previous versions of this driver had no default; this * version changes that, removing overhead for new controller support. * * IF YOUR HARDWARE CAN'T SUPPORT CDC ECM, UPDATE THAT ROUTINE! */ static inline bool has_rndis(void) { #ifdef USB_ETH_RNDIS return true; #else return false; #endif } /*-------------------------------------------------------------------------*/ /* * Kbuild is not very cooperative with respect to linking separately * compiled library objects into one module. So for now we won't use * separate compilation ... ensuring init/exit sections work to shrink * the runtime footprint, and giving us at least some parts of what * a "gcc --combine ... part1.c part2.c part3.c ... " build would. */ #include "composite.c" #include "usbstring.c" #include "config.c" #include "epautoconf.c" #include "f_ecm.c" #include "f_subset.c" #ifdef USB_ETH_RNDIS #include "f_rndis.c" #include "rndis.c" #endif #include "f_eem.c" #include "u_ether.c" /*-------------------------------------------------------------------------*/ /* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!! * Instead: allocate your own, using normal USB-IF procedures. */ /* Thanks to NetChip Technologies for donating this product ID. * It's for devices with only CDC Ethernet configurations. */ #define CDC_VENDOR_NUM 0x0525 /* NetChip */ #define CDC_PRODUCT_NUM 0xa4a1 /* Linux-USB Ethernet Gadget */ /* For hardware that can't talk CDC, we use the same vendor ID that * ARM Linux has used for ethernet-over-usb, both with sa1100 and * with pxa250. We're protocol-compatible, if the host-side drivers * use the endpoint descriptors. bcdDevice (version) is nonzero, so * drivers that need to hard-wire endpoint numbers have a hook. * * The protocol is a minimal subset of CDC Ether, which works on any bulk * hardware that's not deeply broken ... even on hardware that can't talk * RNDIS (like SA-1100, with no interrupt endpoint, or anything that * doesn't handle control-OUT). */ #define SIMPLE_VENDOR_NUM 0x049f #define SIMPLE_PRODUCT_NUM 0x505a /* For hardware that can talk RNDIS and either of the above protocols, * use this ID ... the windows INF files will know it. Unless it's * used with CDC Ethernet, Linux 2.4 hosts will need updates to choose * the non-RNDIS configuration. */ #define RNDIS_VENDOR_NUM 0x0525 /* NetChip */ #define RNDIS_PRODUCT_NUM 0xa4a2 /* Ethernet/RNDIS Gadget */ /* For EEM gadgets */ #define EEM_VENDOR_NUM 0x1d6b /* Linux Foundation */ #define EEM_PRODUCT_NUM 0x0102 /* EEM Gadget */ /*-------------------------------------------------------------------------*/ static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = cpu_to_le16 (0x0200), .bDeviceClass = USB_CLASS_COMM, .bDeviceSubClass = 0, .bDeviceProtocol = 0, /* .bMaxPacketSize0 = f(hardware) */ /* Vendor and product id defaults change according to what configs * we support. (As does bNumConfigurations.) These values can * also be overridden by module parameters. */ .idVendor = cpu_to_le16 (CDC_VENDOR_NUM), .idProduct = cpu_to_le16 (CDC_PRODUCT_NUM), /* .bcdDevice = f(hardware) */ /* .iManufacturer = DYNAMIC */ /* .iProduct = DYNAMIC */ /* NO SERIAL NUMBER */ .bNumConfigurations = 1, }; static struct usb_otg_descriptor otg_descriptor = { .bLength = sizeof otg_descriptor, .bDescriptorType = USB_DT_OTG, /* REVISIT SRP-only hardware is possible, although * it would not be called "OTG" ... */ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP, }; static const struct usb_descriptor_header *otg_desc[] = { (struct usb_descriptor_header *) &otg_descriptor, NULL, }; /* string IDs are assigned dynamically */ #define STRING_MANUFACTURER_IDX 0 #define STRING_PRODUCT_IDX 1 static char manufacturer[50]; static struct usb_string strings_dev[] = { [STRING_MANUFACTURER_IDX].s = manufacturer, [STRING_PRODUCT_IDX].s = PREFIX DRIVER_DESC, { } /* end of list */ }; static struct usb_gadget_strings stringtab_dev = { .language = 0x0409, /* en-us */ .strings = strings_dev, }; static struct usb_gadget_strings *dev_strings[] = { &stringtab_dev, NULL, }; static u8 hostaddr[ETH_ALEN]; /*-------------------------------------------------------------------------*/ /* * We may not have an RNDIS configuration, but if we do it needs to be * the first one present. That's to make Microsoft's drivers happy, * and to follow DOCSIS 1.0 (cable modem standard). */ static int __init rndis_do_config(struct usb_configuration *c) { /* FIXME alloc iConfiguration string, set it in c->strings */ if (gadget_is_otg(c->cdev->gadget)) { c->descriptors = otg_desc; c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; } return rndis_bind_config(c, hostaddr); } static struct usb_configuration rndis_config_driver = { .label = "RNDIS", .bConfigurationValue = 2, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; /*-------------------------------------------------------------------------*/ #ifdef CONFIG_USB_ETH_EEM static bool use_eem = 1; #else static bool use_eem; #endif module_param(use_eem, bool, 0); MODULE_PARM_DESC(use_eem, "use CDC EEM mode"); /* * We _always_ have an ECM, CDC Subset, or EEM configuration. */ static int __init eth_do_config(struct usb_configuration *c) { /* FIXME alloc iConfiguration string, set it in c->strings */ if (gadget_is_otg(c->cdev->gadget)) { c->descriptors = otg_desc; c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; } if (use_eem) return eem_bind_config(c); else if (can_support_ecm(c->cdev->gadget)) return ecm_bind_config(c, hostaddr); else return geth_bind_config(c, hostaddr); } static struct usb_configuration eth_config_driver = { /* .label = f(hardware) */ .bConfigurationValue = 1, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; /*-------------------------------------------------------------------------*/ static int __init eth_bind(struct usb_composite_dev *cdev) { int gcnum; struct usb_gadget *gadget = cdev->gadget; int status; /* set up network link layer */ status = gether_setup(cdev->gadget, hostaddr); if (status < 0) return status; /* set up main config label and device descriptor */ if (use_eem) { /* EEM */ eth_config_driver.label = "CDC Ethernet (EEM)"; device_desc.idVendor = cpu_to_le16(EEM_VENDOR_NUM); device_desc.idProduct = cpu_to_le16(EEM_PRODUCT_NUM); } else if (can_support_ecm(cdev->gadget)) { /* ECM */ eth_config_driver.label = "CDC Ethernet (ECM)"; } else { /* CDC Subset */ eth_config_driver.label = "CDC Subset/SAFE"; device_desc.idVendor = cpu_to_le16(SIMPLE_VENDOR_NUM); device_desc.idProduct = cpu_to_le16(SIMPLE_PRODUCT_NUM); if (!has_rndis()) device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC; } if (has_rndis()) { /* RNDIS plus ECM-or-Subset */ device_desc.idVendor = cpu_to_le16(RNDIS_VENDOR_NUM); device_desc.idProduct = cpu_to_le16(RNDIS_PRODUCT_NUM); device_desc.bNumConfigurations = 2; } gcnum = usb_gadget_controller_number(gadget); if (gcnum >= 0) device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum); else { /* We assume that can_support_ecm() tells the truth; * but if the controller isn't recognized at all then * that assumption is a bit more likely to be wrong. */ dev_warn(&gadget->dev, "controller '%s' not recognized; trying %s\n", gadget->name, eth_config_driver.label); device_desc.bcdDevice = cpu_to_le16(0x0300 | 0x0099); } /* Allocate string descriptor numbers ... note that string * contents can be overridden by the composite_dev glue. */ /* device descriptor strings: manufacturer, product */ snprintf(manufacturer, sizeof manufacturer, "%s %s with %s", init_utsname()->sysname, init_utsname()->release, gadget->name); status = usb_string_id(cdev); if (status < 0) goto fail; strings_dev[STRING_MANUFACTURER_IDX].id = status; device_desc.iManufacturer = status; status = usb_string_id(cdev); if (status < 0) goto fail; strings_dev[STRING_PRODUCT_IDX].id = status; device_desc.iProduct = status; /* register our configuration(s); RNDIS first, if it's used */ if (has_rndis()) { status = usb_add_config(cdev, &rndis_config_driver, rndis_do_config); if (status < 0) goto fail; } status = usb_add_config(cdev, &eth_config_driver, eth_do_config); if (status < 0) goto fail; dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n", DRIVER_DESC); return 0; fail: gether_cleanup(); return status; } static int __exit eth_unbind(struct usb_composite_dev *cdev) { gether_cleanup(); return 0; } static struct usb_composite_driver eth_driver = { .name = "g_ether", .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_SUPER, .unbind = __exit_p(eth_unbind), }; MODULE_DESCRIPTION(PREFIX DRIVER_DESC); MODULE_AUTHOR("David Brownell, Benedikt Spanger"); MODULE_LICENSE("GPL"); static int __init init(void) { return usb_composite_probe(&eth_driver, eth_bind); } module_init(init); static void __exit cleanup(void) { usb_composite_unregister(&eth_driver); } module_exit(cleanup);
gpl-2.0
BytecodeMe/vanquish
drivers/isdn/hisax/teleint.c
5021
7915
/* $Id: teleint.c,v 1.16.2.5 2004/01/19 15:31:50 keil Exp $ * * low level stuff for TeleInt isdn cards * * Author Karsten Keil * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include "hisax.h" #include "isac.h" #include "hfc_2bs0.h" #include "isdnl1.h" static const char *TeleInt_revision = "$Revision: 1.16.2.5 $"; #define byteout(addr,val) outb(val,addr) #define bytein(addr) inb(addr) static inline u_char readreg(unsigned int ale, unsigned int adr, u_char off) { register u_char ret; int max_delay = 2000; byteout(ale, off); ret = HFC_BUSY & bytein(ale); while (ret && --max_delay) ret = HFC_BUSY & bytein(ale); if (!max_delay) { printk(KERN_WARNING "TeleInt Busy not inactive\n"); return (0); } ret = bytein(adr); return (ret); } static inline void readfifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size) { register u_char ret; register int max_delay = 20000; register int i; byteout(ale, off); for (i = 0; i<size; i++) { ret = HFC_BUSY & bytein(ale); while (ret && --max_delay) ret = HFC_BUSY & bytein(ale); if (!max_delay) { printk(KERN_WARNING "TeleInt Busy not inactive\n"); return; } data[i] = bytein(adr); } } static inline void writereg(unsigned int ale, unsigned int adr, u_char off, u_char data) { register u_char ret; int max_delay = 2000; byteout(ale, off); ret = HFC_BUSY & bytein(ale); while (ret && --max_delay) ret = HFC_BUSY & bytein(ale); if (!max_delay) { printk(KERN_WARNING "TeleInt Busy not inactive\n"); return; } byteout(adr, data); } static inline void writefifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size) { register u_char ret; register int max_delay = 20000; register int i; byteout(ale, off); for (i = 0; i<size; i++) { ret = HFC_BUSY & bytein(ale); while (ret && --max_delay) ret = HFC_BUSY & bytein(ale); if (!max_delay) { printk(KERN_WARNING "TeleInt Busy not inactive\n"); return; } byteout(adr, data[i]); } } /* Interface functions */ static u_char ReadISAC(struct IsdnCardState *cs, u_char offset) { cs->hw.hfc.cip = offset; return (readreg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, offset)); } static void WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value) { cs->hw.hfc.cip = offset; writereg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, offset, value); } static void ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size) { cs->hw.hfc.cip = 0; readfifo(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, 0, data, size); } static void WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size) { cs->hw.hfc.cip = 0; writefifo(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, 0, data, size); } static u_char ReadHFC(struct IsdnCardState *cs, int data, u_char reg) { register u_char ret; if (data) { cs->hw.hfc.cip = reg; byteout(cs->hw.hfc.addr | 1, reg); ret = bytein(cs->hw.hfc.addr); if (cs->debug & L1_DEB_HSCX_FIFO && (data != 2)) debugl1(cs, "hfc RD %02x %02x", reg, ret); } else ret = bytein(cs->hw.hfc.addr | 1); return (ret); } static void WriteHFC(struct IsdnCardState *cs, int data, u_char reg, u_char value) { byteout(cs->hw.hfc.addr | 1, reg); cs->hw.hfc.cip = reg; if (data) byteout(cs->hw.hfc.addr, value); if (cs->debug & L1_DEB_HSCX_FIFO && (data != 2)) debugl1(cs, "hfc W%c %02x %02x", data ? 'D' : 'C', reg, value); } static irqreturn_t TeleInt_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char val; u_long flags; spin_lock_irqsave(&cs->lock, flags); val = readreg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, ISAC_ISTA); Start_ISAC: if (val) isac_interrupt(cs, val); val = readreg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, ISAC_ISTA); if (val) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ISAC IntStat after IntRoutine"); goto Start_ISAC; } writereg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, ISAC_MASK, 0xFF); writereg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, ISAC_MASK, 0x0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void TeleInt_Timer(struct IsdnCardState *cs) { int stat = 0; u_long flags; spin_lock_irqsave(&cs->lock, flags); if (cs->bcs[0].mode) { stat |= 1; main_irq_hfc(&cs->bcs[0]); } if (cs->bcs[1].mode) { stat |= 2; main_irq_hfc(&cs->bcs[1]); } spin_unlock_irqrestore(&cs->lock, flags); stat = HZ/100; if (!stat) stat = 1; cs->hw.hfc.timer.expires = jiffies + stat; add_timer(&cs->hw.hfc.timer); } static void release_io_TeleInt(struct IsdnCardState *cs) { del_timer(&cs->hw.hfc.timer); releasehfc(cs); if (cs->hw.hfc.addr) release_region(cs->hw.hfc.addr, 2); } static void reset_TeleInt(struct IsdnCardState *cs) { printk(KERN_INFO "TeleInt: resetting card\n"); cs->hw.hfc.cirm |= HFC_RESET; byteout(cs->hw.hfc.addr | 1, cs->hw.hfc.cirm); /* Reset On */ mdelay(10); cs->hw.hfc.cirm &= ~HFC_RESET; byteout(cs->hw.hfc.addr | 1, cs->hw.hfc.cirm); /* Reset Off */ mdelay(10); } static int TeleInt_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; int delay; switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); reset_TeleInt(cs); spin_unlock_irqrestore(&cs->lock, flags); return(0); case CARD_RELEASE: release_io_TeleInt(cs); return(0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); reset_TeleInt(cs); inithfc(cs); clear_pending_isac_ints(cs); initisac(cs); /* Reenable all IRQ */ cs->writeisac(cs, ISAC_MASK, 0); cs->writeisac(cs, ISAC_CMDR, 0x41); spin_unlock_irqrestore(&cs->lock, flags); delay = HZ/100; if (!delay) delay = 1; cs->hw.hfc.timer.expires = jiffies + delay; add_timer(&cs->hw.hfc.timer); return(0); case CARD_TEST: return(0); } return(0); } int __devinit setup_TeleInt(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; char tmp[64]; strcpy(tmp, TeleInt_revision); printk(KERN_INFO "HiSax: TeleInt driver Rev. %s\n", HiSax_getrev(tmp)); if (cs->typ != ISDN_CTYPE_TELEINT) return (0); cs->hw.hfc.addr = card->para[1] & 0x3fe; cs->irq = card->para[0]; cs->hw.hfc.cirm = HFC_CIRM; cs->hw.hfc.isac_spcr = 0x00; cs->hw.hfc.cip = 0; cs->hw.hfc.ctmt = HFC_CTMT | HFC_CLTIMER; cs->bcs[0].hw.hfc.send = NULL; cs->bcs[1].hw.hfc.send = NULL; cs->hw.hfc.fifosize = 7 * 1024 + 512; cs->hw.hfc.timer.function = (void *) TeleInt_Timer; cs->hw.hfc.timer.data = (long) cs; init_timer(&cs->hw.hfc.timer); if (!request_region(cs->hw.hfc.addr, 2, "TeleInt isdn")) { printk(KERN_WARNING "HiSax: TeleInt config port %x-%x already in use\n", cs->hw.hfc.addr, cs->hw.hfc.addr + 2); return (0); } /* HW IO = IO */ byteout(cs->hw.hfc.addr, cs->hw.hfc.addr & 0xff); byteout(cs->hw.hfc.addr | 1, ((cs->hw.hfc.addr & 0x300) >> 8) | 0x54); switch (cs->irq) { case 3: cs->hw.hfc.cirm |= HFC_INTA; break; case 4: cs->hw.hfc.cirm |= HFC_INTB; break; case 5: cs->hw.hfc.cirm |= HFC_INTC; break; case 7: cs->hw.hfc.cirm |= HFC_INTD; break; case 10: cs->hw.hfc.cirm |= HFC_INTE; break; case 11: cs->hw.hfc.cirm |= HFC_INTF; break; default: printk(KERN_WARNING "TeleInt: wrong IRQ\n"); release_io_TeleInt(cs); return (0); } byteout(cs->hw.hfc.addr | 1, cs->hw.hfc.cirm); byteout(cs->hw.hfc.addr | 1, cs->hw.hfc.ctmt); printk(KERN_INFO "TeleInt: defined at 0x%x IRQ %d\n", cs->hw.hfc.addr, cs->irq); setup_isac(cs); cs->readisac = &ReadISAC; cs->writeisac = &WriteISAC; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; cs->BC_Read_Reg = &ReadHFC; cs->BC_Write_Reg = &WriteHFC; cs->cardmsg = &TeleInt_card_msg; cs->irq_func = &TeleInt_interrupt; ISACVersion(cs, "TeleInt:"); return (1); }
gpl-2.0
lujji/JXD-7800b-JB-kernel
drivers/isdn/hisax/teleint.c
5021
7915
/* $Id: teleint.c,v 1.16.2.5 2004/01/19 15:31:50 keil Exp $ * * low level stuff for TeleInt isdn cards * * Author Karsten Keil * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include "hisax.h" #include "isac.h" #include "hfc_2bs0.h" #include "isdnl1.h" static const char *TeleInt_revision = "$Revision: 1.16.2.5 $"; #define byteout(addr,val) outb(val,addr) #define bytein(addr) inb(addr) static inline u_char readreg(unsigned int ale, unsigned int adr, u_char off) { register u_char ret; int max_delay = 2000; byteout(ale, off); ret = HFC_BUSY & bytein(ale); while (ret && --max_delay) ret = HFC_BUSY & bytein(ale); if (!max_delay) { printk(KERN_WARNING "TeleInt Busy not inactive\n"); return (0); } ret = bytein(adr); return (ret); } static inline void readfifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size) { register u_char ret; register int max_delay = 20000; register int i; byteout(ale, off); for (i = 0; i<size; i++) { ret = HFC_BUSY & bytein(ale); while (ret && --max_delay) ret = HFC_BUSY & bytein(ale); if (!max_delay) { printk(KERN_WARNING "TeleInt Busy not inactive\n"); return; } data[i] = bytein(adr); } } static inline void writereg(unsigned int ale, unsigned int adr, u_char off, u_char data) { register u_char ret; int max_delay = 2000; byteout(ale, off); ret = HFC_BUSY & bytein(ale); while (ret && --max_delay) ret = HFC_BUSY & bytein(ale); if (!max_delay) { printk(KERN_WARNING "TeleInt Busy not inactive\n"); return; } byteout(adr, data); } static inline void writefifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size) { register u_char ret; register int max_delay = 20000; register int i; byteout(ale, off); for (i = 0; i<size; i++) { ret = HFC_BUSY & bytein(ale); while (ret && --max_delay) ret = HFC_BUSY & bytein(ale); if (!max_delay) { printk(KERN_WARNING "TeleInt Busy not inactive\n"); return; } byteout(adr, data[i]); } } /* Interface functions */ static u_char ReadISAC(struct IsdnCardState *cs, u_char offset) { cs->hw.hfc.cip = offset; return (readreg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, offset)); } static void WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value) { cs->hw.hfc.cip = offset; writereg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, offset, value); } static void ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size) { cs->hw.hfc.cip = 0; readfifo(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, 0, data, size); } static void WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size) { cs->hw.hfc.cip = 0; writefifo(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, 0, data, size); } static u_char ReadHFC(struct IsdnCardState *cs, int data, u_char reg) { register u_char ret; if (data) { cs->hw.hfc.cip = reg; byteout(cs->hw.hfc.addr | 1, reg); ret = bytein(cs->hw.hfc.addr); if (cs->debug & L1_DEB_HSCX_FIFO && (data != 2)) debugl1(cs, "hfc RD %02x %02x", reg, ret); } else ret = bytein(cs->hw.hfc.addr | 1); return (ret); } static void WriteHFC(struct IsdnCardState *cs, int data, u_char reg, u_char value) { byteout(cs->hw.hfc.addr | 1, reg); cs->hw.hfc.cip = reg; if (data) byteout(cs->hw.hfc.addr, value); if (cs->debug & L1_DEB_HSCX_FIFO && (data != 2)) debugl1(cs, "hfc W%c %02x %02x", data ? 'D' : 'C', reg, value); } static irqreturn_t TeleInt_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char val; u_long flags; spin_lock_irqsave(&cs->lock, flags); val = readreg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, ISAC_ISTA); Start_ISAC: if (val) isac_interrupt(cs, val); val = readreg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, ISAC_ISTA); if (val) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ISAC IntStat after IntRoutine"); goto Start_ISAC; } writereg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, ISAC_MASK, 0xFF); writereg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, ISAC_MASK, 0x0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void TeleInt_Timer(struct IsdnCardState *cs) { int stat = 0; u_long flags; spin_lock_irqsave(&cs->lock, flags); if (cs->bcs[0].mode) { stat |= 1; main_irq_hfc(&cs->bcs[0]); } if (cs->bcs[1].mode) { stat |= 2; main_irq_hfc(&cs->bcs[1]); } spin_unlock_irqrestore(&cs->lock, flags); stat = HZ/100; if (!stat) stat = 1; cs->hw.hfc.timer.expires = jiffies + stat; add_timer(&cs->hw.hfc.timer); } static void release_io_TeleInt(struct IsdnCardState *cs) { del_timer(&cs->hw.hfc.timer); releasehfc(cs); if (cs->hw.hfc.addr) release_region(cs->hw.hfc.addr, 2); } static void reset_TeleInt(struct IsdnCardState *cs) { printk(KERN_INFO "TeleInt: resetting card\n"); cs->hw.hfc.cirm |= HFC_RESET; byteout(cs->hw.hfc.addr | 1, cs->hw.hfc.cirm); /* Reset On */ mdelay(10); cs->hw.hfc.cirm &= ~HFC_RESET; byteout(cs->hw.hfc.addr | 1, cs->hw.hfc.cirm); /* Reset Off */ mdelay(10); } static int TeleInt_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; int delay; switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); reset_TeleInt(cs); spin_unlock_irqrestore(&cs->lock, flags); return(0); case CARD_RELEASE: release_io_TeleInt(cs); return(0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); reset_TeleInt(cs); inithfc(cs); clear_pending_isac_ints(cs); initisac(cs); /* Reenable all IRQ */ cs->writeisac(cs, ISAC_MASK, 0); cs->writeisac(cs, ISAC_CMDR, 0x41); spin_unlock_irqrestore(&cs->lock, flags); delay = HZ/100; if (!delay) delay = 1; cs->hw.hfc.timer.expires = jiffies + delay; add_timer(&cs->hw.hfc.timer); return(0); case CARD_TEST: return(0); } return(0); } int __devinit setup_TeleInt(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; char tmp[64]; strcpy(tmp, TeleInt_revision); printk(KERN_INFO "HiSax: TeleInt driver Rev. %s\n", HiSax_getrev(tmp)); if (cs->typ != ISDN_CTYPE_TELEINT) return (0); cs->hw.hfc.addr = card->para[1] & 0x3fe; cs->irq = card->para[0]; cs->hw.hfc.cirm = HFC_CIRM; cs->hw.hfc.isac_spcr = 0x00; cs->hw.hfc.cip = 0; cs->hw.hfc.ctmt = HFC_CTMT | HFC_CLTIMER; cs->bcs[0].hw.hfc.send = NULL; cs->bcs[1].hw.hfc.send = NULL; cs->hw.hfc.fifosize = 7 * 1024 + 512; cs->hw.hfc.timer.function = (void *) TeleInt_Timer; cs->hw.hfc.timer.data = (long) cs; init_timer(&cs->hw.hfc.timer); if (!request_region(cs->hw.hfc.addr, 2, "TeleInt isdn")) { printk(KERN_WARNING "HiSax: TeleInt config port %x-%x already in use\n", cs->hw.hfc.addr, cs->hw.hfc.addr + 2); return (0); } /* HW IO = IO */ byteout(cs->hw.hfc.addr, cs->hw.hfc.addr & 0xff); byteout(cs->hw.hfc.addr | 1, ((cs->hw.hfc.addr & 0x300) >> 8) | 0x54); switch (cs->irq) { case 3: cs->hw.hfc.cirm |= HFC_INTA; break; case 4: cs->hw.hfc.cirm |= HFC_INTB; break; case 5: cs->hw.hfc.cirm |= HFC_INTC; break; case 7: cs->hw.hfc.cirm |= HFC_INTD; break; case 10: cs->hw.hfc.cirm |= HFC_INTE; break; case 11: cs->hw.hfc.cirm |= HFC_INTF; break; default: printk(KERN_WARNING "TeleInt: wrong IRQ\n"); release_io_TeleInt(cs); return (0); } byteout(cs->hw.hfc.addr | 1, cs->hw.hfc.cirm); byteout(cs->hw.hfc.addr | 1, cs->hw.hfc.ctmt); printk(KERN_INFO "TeleInt: defined at 0x%x IRQ %d\n", cs->hw.hfc.addr, cs->irq); setup_isac(cs); cs->readisac = &ReadISAC; cs->writeisac = &WriteISAC; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; cs->BC_Read_Reg = &ReadHFC; cs->BC_Write_Reg = &WriteHFC; cs->cardmsg = &TeleInt_card_msg; cs->irq_func = &TeleInt_interrupt; ISACVersion(cs, "TeleInt:"); return (1); }
gpl-2.0
MSM8226-Samsung/kernel_samsung_msm8226
sound/firewire/amdtp.c
7837
15224
/* * Audio and Music Data Transmission Protocol (IEC 61883-6) streams * with Common Isochronous Packet (IEC 61883-1) headers * * Copyright (c) Clemens Ladisch <clemens@ladisch.de> * Licensed under the terms of the GNU General Public License, version 2. */ #include <linux/device.h> #include <linux/err.h> #include <linux/firewire.h> #include <linux/module.h> #include <linux/slab.h> #include <sound/pcm.h> #include "amdtp.h" #define TICKS_PER_CYCLE 3072 #define CYCLES_PER_SECOND 8000 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND) #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 µs */ #define TAG_CIP 1 #define CIP_EOH (1u << 31) #define CIP_FMT_AM (0x10 << 24) #define AMDTP_FDF_AM824 (0 << 19) #define AMDTP_FDF_SFC_SHIFT 16 /* TODO: make these configurable */ #define INTERRUPT_INTERVAL 16 #define QUEUE_LENGTH 48 /** * amdtp_out_stream_init - initialize an AMDTP output stream structure * @s: the AMDTP output stream to initialize * @unit: the target of the stream * @flags: the packet transmission method to use */ int amdtp_out_stream_init(struct amdtp_out_stream *s, struct fw_unit *unit, enum cip_out_flags flags) { if (flags != CIP_NONBLOCKING) return -EINVAL; s->unit = fw_unit_get(unit); s->flags = flags; s->context = ERR_PTR(-1); mutex_init(&s->mutex); s->packet_index = 0; return 0; } EXPORT_SYMBOL(amdtp_out_stream_init); /** * amdtp_out_stream_destroy - free stream resources * @s: the AMDTP output stream to destroy */ void amdtp_out_stream_destroy(struct amdtp_out_stream *s) { WARN_ON(!IS_ERR(s->context)); mutex_destroy(&s->mutex); fw_unit_put(s->unit); } EXPORT_SYMBOL(amdtp_out_stream_destroy); /** * amdtp_out_stream_set_rate - set the sample rate * @s: the AMDTP output stream to configure * @rate: the sample rate * * The sample rate must be set before the stream is started, and must not be * changed while the stream is running. */ void amdtp_out_stream_set_rate(struct amdtp_out_stream *s, unsigned int rate) { static const struct { unsigned int rate; unsigned int syt_interval; } rate_info[] = { [CIP_SFC_32000] = { 32000, 8, }, [CIP_SFC_44100] = { 44100, 8, }, [CIP_SFC_48000] = { 48000, 8, }, [CIP_SFC_88200] = { 88200, 16, }, [CIP_SFC_96000] = { 96000, 16, }, [CIP_SFC_176400] = { 176400, 32, }, [CIP_SFC_192000] = { 192000, 32, }, }; unsigned int sfc; if (WARN_ON(!IS_ERR(s->context))) return; for (sfc = 0; sfc < ARRAY_SIZE(rate_info); ++sfc) if (rate_info[sfc].rate == rate) { s->sfc = sfc; s->syt_interval = rate_info[sfc].syt_interval; return; } WARN_ON(1); } EXPORT_SYMBOL(amdtp_out_stream_set_rate); /** * amdtp_out_stream_get_max_payload - get the stream's packet size * @s: the AMDTP output stream * * This function must not be called before the stream has been configured * with amdtp_out_stream_set_hw_params(), amdtp_out_stream_set_pcm(), and * amdtp_out_stream_set_midi(). */ unsigned int amdtp_out_stream_get_max_payload(struct amdtp_out_stream *s) { static const unsigned int max_data_blocks[] = { [CIP_SFC_32000] = 4, [CIP_SFC_44100] = 6, [CIP_SFC_48000] = 6, [CIP_SFC_88200] = 12, [CIP_SFC_96000] = 12, [CIP_SFC_176400] = 23, [CIP_SFC_192000] = 24, }; s->data_block_quadlets = s->pcm_channels; s->data_block_quadlets += DIV_ROUND_UP(s->midi_ports, 8); return 8 + max_data_blocks[s->sfc] * 4 * s->data_block_quadlets; } EXPORT_SYMBOL(amdtp_out_stream_get_max_payload); static void amdtp_write_s16(struct amdtp_out_stream *s, struct snd_pcm_substream *pcm, __be32 *buffer, unsigned int frames); static void amdtp_write_s32(struct amdtp_out_stream *s, struct snd_pcm_substream *pcm, __be32 *buffer, unsigned int frames); /** * amdtp_out_stream_set_pcm_format - set the PCM format * @s: the AMDTP output stream to configure * @format: the format of the ALSA PCM device * * The sample format must be set before the stream is started, and must not be * changed while the stream is running. */ void amdtp_out_stream_set_pcm_format(struct amdtp_out_stream *s, snd_pcm_format_t format) { if (WARN_ON(!IS_ERR(s->context))) return; switch (format) { default: WARN_ON(1); /* fall through */ case SNDRV_PCM_FORMAT_S16: s->transfer_samples = amdtp_write_s16; break; case SNDRV_PCM_FORMAT_S32: s->transfer_samples = amdtp_write_s32; break; } } EXPORT_SYMBOL(amdtp_out_stream_set_pcm_format); static unsigned int calculate_data_blocks(struct amdtp_out_stream *s) { unsigned int phase, data_blocks; if (!cip_sfc_is_base_44100(s->sfc)) { /* Sample_rate / 8000 is an integer, and precomputed. */ data_blocks = s->data_block_state; } else { phase = s->data_block_state; /* * This calculates the number of data blocks per packet so that * 1) the overall rate is correct and exactly synchronized to * the bus clock, and * 2) packets with a rounded-up number of blocks occur as early * as possible in the sequence (to prevent underruns of the * device's buffer). */ if (s->sfc == CIP_SFC_44100) /* 6 6 5 6 5 6 5 ... */ data_blocks = 5 + ((phase & 1) ^ (phase == 0 || phase >= 40)); else /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */ data_blocks = 11 * (s->sfc >> 1) + (phase == 0); if (++phase >= (80 >> (s->sfc >> 1))) phase = 0; s->data_block_state = phase; } return data_blocks; } static unsigned int calculate_syt(struct amdtp_out_stream *s, unsigned int cycle) { unsigned int syt_offset, phase, index, syt; if (s->last_syt_offset < TICKS_PER_CYCLE) { if (!cip_sfc_is_base_44100(s->sfc)) syt_offset = s->last_syt_offset + s->syt_offset_state; else { /* * The time, in ticks, of the n'th SYT_INTERVAL sample is: * n * SYT_INTERVAL * 24576000 / sample_rate * Modulo TICKS_PER_CYCLE, the difference between successive * elements is about 1386.23. Rounding the results of this * formula to the SYT precision results in a sequence of * differences that begins with: * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ... * This code generates _exactly_ the same sequence. */ phase = s->syt_offset_state; index = phase % 13; syt_offset = s->last_syt_offset; syt_offset += 1386 + ((index && !(index & 3)) || phase == 146); if (++phase >= 147) phase = 0; s->syt_offset_state = phase; } } else syt_offset = s->last_syt_offset - TICKS_PER_CYCLE; s->last_syt_offset = syt_offset; if (syt_offset < TICKS_PER_CYCLE) { syt_offset += TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE; syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12; syt += syt_offset % TICKS_PER_CYCLE; return syt & 0xffff; } else { return 0xffff; /* no info */ } } static void amdtp_write_s32(struct amdtp_out_stream *s, struct snd_pcm_substream *pcm, __be32 *buffer, unsigned int frames) { struct snd_pcm_runtime *runtime = pcm->runtime; unsigned int channels, remaining_frames, frame_step, i, c; const u32 *src; channels = s->pcm_channels; src = (void *)runtime->dma_area + s->pcm_buffer_pointer * (runtime->frame_bits / 8); remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer; frame_step = s->data_block_quadlets - channels; for (i = 0; i < frames; ++i) { for (c = 0; c < channels; ++c) { *buffer = cpu_to_be32((*src >> 8) | 0x40000000); src++; buffer++; } buffer += frame_step; if (--remaining_frames == 0) src = (void *)runtime->dma_area; } } static void amdtp_write_s16(struct amdtp_out_stream *s, struct snd_pcm_substream *pcm, __be32 *buffer, unsigned int frames) { struct snd_pcm_runtime *runtime = pcm->runtime; unsigned int channels, remaining_frames, frame_step, i, c; const u16 *src; channels = s->pcm_channels; src = (void *)runtime->dma_area + s->pcm_buffer_pointer * (runtime->frame_bits / 8); remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer; frame_step = s->data_block_quadlets - channels; for (i = 0; i < frames; ++i) { for (c = 0; c < channels; ++c) { *buffer = cpu_to_be32((*src << 8) | 0x40000000); src++; buffer++; } buffer += frame_step; if (--remaining_frames == 0) src = (void *)runtime->dma_area; } } static void amdtp_fill_pcm_silence(struct amdtp_out_stream *s, __be32 *buffer, unsigned int frames) { unsigned int i, c; for (i = 0; i < frames; ++i) { for (c = 0; c < s->pcm_channels; ++c) buffer[c] = cpu_to_be32(0x40000000); buffer += s->data_block_quadlets; } } static void amdtp_fill_midi(struct amdtp_out_stream *s, __be32 *buffer, unsigned int frames) { unsigned int i; for (i = 0; i < frames; ++i) buffer[s->pcm_channels + i * s->data_block_quadlets] = cpu_to_be32(0x80000000); } static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle) { __be32 *buffer; unsigned int index, data_blocks, syt, ptr; struct snd_pcm_substream *pcm; struct fw_iso_packet packet; int err; if (s->packet_index < 0) return; index = s->packet_index; data_blocks = calculate_data_blocks(s); syt = calculate_syt(s, cycle); buffer = s->buffer.packets[index].buffer; buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) | (s->data_block_quadlets << 16) | s->data_block_counter); buffer[1] = cpu_to_be32(CIP_EOH | CIP_FMT_AM | AMDTP_FDF_AM824 | (s->sfc << AMDTP_FDF_SFC_SHIFT) | syt); buffer += 2; pcm = ACCESS_ONCE(s->pcm); if (pcm) s->transfer_samples(s, pcm, buffer, data_blocks); else amdtp_fill_pcm_silence(s, buffer, data_blocks); if (s->midi_ports) amdtp_fill_midi(s, buffer, data_blocks); s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff; packet.payload_length = 8 + data_blocks * 4 * s->data_block_quadlets; packet.interrupt = IS_ALIGNED(index + 1, INTERRUPT_INTERVAL); packet.skip = 0; packet.tag = TAG_CIP; packet.sy = 0; packet.header_length = 0; err = fw_iso_context_queue(s->context, &packet, &s->buffer.iso_buffer, s->buffer.packets[index].offset); if (err < 0) { dev_err(&s->unit->device, "queueing error: %d\n", err); s->packet_index = -1; amdtp_out_stream_pcm_abort(s); return; } if (++index >= QUEUE_LENGTH) index = 0; s->packet_index = index; if (pcm) { ptr = s->pcm_buffer_pointer + data_blocks; if (ptr >= pcm->runtime->buffer_size) ptr -= pcm->runtime->buffer_size; ACCESS_ONCE(s->pcm_buffer_pointer) = ptr; s->pcm_period_pointer += data_blocks; if (s->pcm_period_pointer >= pcm->runtime->period_size) { s->pcm_period_pointer -= pcm->runtime->period_size; snd_pcm_period_elapsed(pcm); } } } static void out_packet_callback(struct fw_iso_context *context, u32 cycle, size_t header_length, void *header, void *data) { struct amdtp_out_stream *s = data; unsigned int i, packets = header_length / 4; /* * Compute the cycle of the last queued packet. * (We need only the four lowest bits for the SYT, so we can ignore * that bits 0-11 must wrap around at 3072.) */ cycle += QUEUE_LENGTH - packets; for (i = 0; i < packets; ++i) queue_out_packet(s, ++cycle); fw_iso_context_queue_flush(s->context); } static int queue_initial_skip_packets(struct amdtp_out_stream *s) { struct fw_iso_packet skip_packet = { .skip = 1, }; unsigned int i; int err; for (i = 0; i < QUEUE_LENGTH; ++i) { skip_packet.interrupt = IS_ALIGNED(s->packet_index + 1, INTERRUPT_INTERVAL); err = fw_iso_context_queue(s->context, &skip_packet, NULL, 0); if (err < 0) return err; if (++s->packet_index >= QUEUE_LENGTH) s->packet_index = 0; } return 0; } /** * amdtp_out_stream_start - start sending packets * @s: the AMDTP output stream to start * @channel: the isochronous channel on the bus * @speed: firewire speed code * * The stream cannot be started until it has been configured with * amdtp_out_stream_set_hw_params(), amdtp_out_stream_set_pcm(), and * amdtp_out_stream_set_midi(); and it must be started before any * PCM or MIDI device can be started. */ int amdtp_out_stream_start(struct amdtp_out_stream *s, int channel, int speed) { static const struct { unsigned int data_block; unsigned int syt_offset; } initial_state[] = { [CIP_SFC_32000] = { 4, 3072 }, [CIP_SFC_48000] = { 6, 1024 }, [CIP_SFC_96000] = { 12, 1024 }, [CIP_SFC_192000] = { 24, 1024 }, [CIP_SFC_44100] = { 0, 67 }, [CIP_SFC_88200] = { 0, 67 }, [CIP_SFC_176400] = { 0, 67 }, }; int err; mutex_lock(&s->mutex); if (WARN_ON(!IS_ERR(s->context) || (!s->pcm_channels && !s->midi_ports))) { err = -EBADFD; goto err_unlock; } s->data_block_state = initial_state[s->sfc].data_block; s->syt_offset_state = initial_state[s->sfc].syt_offset; s->last_syt_offset = TICKS_PER_CYCLE; err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH, amdtp_out_stream_get_max_payload(s), DMA_TO_DEVICE); if (err < 0) goto err_unlock; s->context = fw_iso_context_create(fw_parent_device(s->unit)->card, FW_ISO_CONTEXT_TRANSMIT, channel, speed, 0, out_packet_callback, s); if (IS_ERR(s->context)) { err = PTR_ERR(s->context); if (err == -EBUSY) dev_err(&s->unit->device, "no free output stream on this controller\n"); goto err_buffer; } amdtp_out_stream_update(s); s->packet_index = 0; s->data_block_counter = 0; err = queue_initial_skip_packets(s); if (err < 0) goto err_context; err = fw_iso_context_start(s->context, -1, 0, 0); if (err < 0) goto err_context; mutex_unlock(&s->mutex); return 0; err_context: fw_iso_context_destroy(s->context); s->context = ERR_PTR(-1); err_buffer: iso_packets_buffer_destroy(&s->buffer, s->unit); err_unlock: mutex_unlock(&s->mutex); return err; } EXPORT_SYMBOL(amdtp_out_stream_start); /** * amdtp_out_stream_update - update the stream after a bus reset * @s: the AMDTP output stream */ void amdtp_out_stream_update(struct amdtp_out_stream *s) { ACCESS_ONCE(s->source_node_id_field) = (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24; } EXPORT_SYMBOL(amdtp_out_stream_update); /** * amdtp_out_stream_stop - stop sending packets * @s: the AMDTP output stream to stop * * All PCM and MIDI devices of the stream must be stopped before the stream * itself can be stopped. */ void amdtp_out_stream_stop(struct amdtp_out_stream *s) { mutex_lock(&s->mutex); if (IS_ERR(s->context)) { mutex_unlock(&s->mutex); return; } fw_iso_context_stop(s->context); fw_iso_context_destroy(s->context); s->context = ERR_PTR(-1); iso_packets_buffer_destroy(&s->buffer, s->unit); mutex_unlock(&s->mutex); } EXPORT_SYMBOL(amdtp_out_stream_stop); /** * amdtp_out_stream_pcm_abort - abort the running PCM device * @s: the AMDTP stream about to be stopped * * If the isochronous stream needs to be stopped asynchronously, call this * function first to stop the PCM device. */ void amdtp_out_stream_pcm_abort(struct amdtp_out_stream *s) { struct snd_pcm_substream *pcm; pcm = ACCESS_ONCE(s->pcm); if (pcm) { snd_pcm_stream_lock_irq(pcm); if (snd_pcm_running(pcm)) snd_pcm_stop(pcm, SNDRV_PCM_STATE_XRUN); snd_pcm_stream_unlock_irq(pcm); } } EXPORT_SYMBOL(amdtp_out_stream_pcm_abort);
gpl-2.0
kraml/desire-sense-kernel
arch/arm/mach-msm/dal.c
158
33159
/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. * Copyright (c) 2009, HTC Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Code Aurora Forum nor * the names of its contributors may be used to endorse or promote * products derived from this software without specific prior written * permission. * * Alternatively, provided that this notice is retained in full, this software * may be relicensed by the recipient under the terms of the GNU General Public * License version 2 ("GPL") and only version 2, in which case the provisions of * the GPL apply INSTEAD OF those given above. If the recipient relicenses the * software under the GPL, then the identification text in the MODULE_LICENSE * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a * recipient changes the license terms to the GPL, subsequent recipients shall * not relicense under alternate licensing terms, including the BSD or dual * BSD/GPL terms. In addition, the following license statement immediately * below and between the words START and END shall also then apply when this * software is relicensed under the GPL: * * START * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 and only version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * END * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ /* * Device access library (DAL) implementation. */ #include <linux/kernel.h> #include <linux/completion.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/semaphore.h> #include <linux/delay.h> #include <mach/dal.h> #include <mach/msm_smd.h> #include "smd_private.h" #include "smd_debug.h" #define DALRPC_PROTOCOL_VERSION 0x11 #define DALRPC_SUCCESS 0 #define DALRPC_MAX_PORTNAME_LEN 64 #define DALRPC_MAX_ATTACH_PARAM_LEN 64 #define DALRPC_MAX_SERVICE_NAME_LEN 32 #define DALRPC_MAX_PARAMS 128 #define DALRPC_MAX_PARAMS_SIZE (DALRPC_MAX_PARAMS * 4) #define DALRPC_MAX_MSG_SIZE (sizeof(struct dalrpc_msg_hdr) + \ DALRPC_MAX_PARAMS_SIZE) #define DALRPC_MSGID_DDI 0x0 #define DALRPC_MSGID_DDI_REPLY 0x80 #define DALRPC_MSGID_ATTACH_REPLY 0x81 #define DALRPC_MSGID_DETACH_REPLY 0x82 #define DALRPC_MSGID_ASYNCH 0xC0 #define ROUND_BUFLEN(x) (((x + 3) & ~0x3)) #define MAX_RETRY_COUNT 5 #define RETRY_DELAY 10 struct dalrpc_msg_hdr { uint32_t len:16; uint32_t proto_ver:8; uint32_t prio:7; uint32_t async:1; uint32_t ddi_idx:16; uint32_t proto_id:8; uint32_t msgid:8; void *from; void *to; }; struct dalrpc_msg { struct dalrpc_msg_hdr hdr; uint32_t param[DALRPC_MAX_PARAMS]; }; struct dalrpc_event_handle { struct list_head list; int flag; spinlock_t lock; }; struct dalrpc_cb_handle { struct list_head list; void (*fn)(void *, uint32_t, void *, uint32_t); void *context; }; struct daldevice_handle {; struct list_head list; void *remote_handle; struct completion read_completion; struct dalrpc_port *port; struct dalrpc_msg msg; struct mutex client_lock; }; struct dalrpc_port { struct list_head list; char port[DALRPC_MAX_PORTNAME_LEN+1]; int refcount; struct workqueue_struct *wq; struct work_struct port_work; struct mutex write_lock; smd_channel_t *ch; struct dalrpc_msg msg_in; struct daldevice_handle *msg_owner; unsigned msg_bytes_read; struct list_head event_list; struct mutex event_list_lock; struct list_head cb_list; struct mutex cb_list_lock; }; static LIST_HEAD(port_list); static LIST_HEAD(client_list); static DEFINE_MUTEX(pc_lists_lock); static DECLARE_WAIT_QUEUE_HEAD(event_wq); static int client_exists(void *handle) { struct daldevice_handle *h; if (!handle) return 0; mutex_lock(&pc_lists_lock); list_for_each_entry(h, &client_list, list) if (h == handle) { mutex_unlock(&pc_lists_lock); return 1; } mutex_unlock(&pc_lists_lock); return 0; } static int client_exists_locked(void *handle) { struct daldevice_handle *h; /* this function must be called with pc_lists_lock acquired */ if (!handle) return 0; list_for_each_entry(h, &client_list, list) if (h == handle) return 1; return 0; } static int port_exists(struct dalrpc_port *p) { struct dalrpc_port *p_iter; /* this function must be called with pc_lists_lock acquired */ if (!p) return 0; list_for_each_entry(p_iter, &port_list, list) if (p_iter == p) return 1; return 0; } static struct dalrpc_port *port_name_exists(char *port) { struct dalrpc_port *p; /* this function must be called with pc_lists_lock acquired */ list_for_each_entry(p, &port_list, list) if (!strcmp(p->port, port)) return p; return NULL; } static void port_close(struct dalrpc_port *p) { mutex_lock(&pc_lists_lock); p->refcount--; if (p->refcount == 0) list_del(&p->list); mutex_unlock(&pc_lists_lock); if (p->refcount == 0) { destroy_workqueue(p->wq); smd_close(p->ch); kfree(p); } } static int event_exists(struct dalrpc_port *p, struct dalrpc_event_handle *ev) { struct dalrpc_event_handle *ev_iter; /* this function must be called with event_list_lock acquired */ list_for_each_entry(ev_iter, &p->event_list, list) if (ev_iter == ev) return 1; return 0; } static int cb_exists(struct dalrpc_port *p, struct dalrpc_cb_handle *cb) { struct dalrpc_cb_handle *cb_iter; /* this function must be called with the cb_list_lock acquired */ list_for_each_entry(cb_iter, &p->cb_list, list) if (cb_iter == cb) return 1; return 0; } static int check_version(struct dalrpc_msg_hdr *msg_hdr) { static int version_msg = 1; /* disabled because asynch events currently have no version */ return 0; if (msg_hdr->proto_ver != DALRPC_PROTOCOL_VERSION) { if (version_msg) { printk(KERN_ERR "dalrpc: incompatible verison\n"); version_msg = 0; } return -1; } return 0; } static void process_asynch(struct dalrpc_port *p) { struct dalrpc_event_handle *ev; struct dalrpc_cb_handle *cb; ev = (struct dalrpc_event_handle *)p->msg_in.param[0]; cb = (struct dalrpc_cb_handle *)p->msg_in.param[0]; mutex_lock(&p->event_list_lock); if (event_exists(p, ev)) { spin_lock(&ev->lock); ev->flag = 1; spin_unlock(&ev->lock); smp_mb(); wake_up_all(&event_wq); mutex_unlock(&p->event_list_lock); return; } mutex_unlock(&p->event_list_lock); mutex_lock(&p->cb_list_lock); if (cb_exists(p, cb)) { cb->fn(cb->context, p->msg_in.param[1], &p->msg_in.param[3], p->msg_in.param[2]); mutex_unlock(&p->cb_list_lock); return; } mutex_unlock(&p->cb_list_lock); } static void process_msg(struct dalrpc_port *p) { switch (p->msg_in.hdr.msgid) { case DALRPC_MSGID_DDI_REPLY: case DALRPC_MSGID_ATTACH_REPLY: case DALRPC_MSGID_DETACH_REPLY: complete(&p->msg_owner->read_completion); break; case DALRPC_MSGID_ASYNCH: process_asynch(p); break; default: printk(KERN_ERR "process_msg: bad msgid %#x\n", p->msg_in.hdr.msgid); } } static void flush_msg(struct dalrpc_port *p) { int bytes_read, len; len = p->msg_in.hdr.len - sizeof(struct dalrpc_msg_hdr); while (len > 0) { bytes_read = smd_read(p->ch, NULL, len); if (bytes_read <= 0) break; len -= bytes_read; } p->msg_bytes_read = 0; } static int check_header(struct dalrpc_port *p) { if (check_version(&p->msg_in.hdr) || p->msg_in.hdr.len > DALRPC_MAX_MSG_SIZE || (p->msg_in.hdr.msgid != DALRPC_MSGID_ASYNCH && !client_exists_locked(p->msg_in.hdr.to))) { printk(KERN_ERR "dalrpc_read_msg: bad msg\n"); flush_msg(p); return 1; } p->msg_owner = (struct daldevice_handle *)p->msg_in.hdr.to; if (p->msg_in.hdr.msgid != DALRPC_MSGID_ASYNCH) memcpy(&p->msg_owner->msg.hdr, &p->msg_in.hdr, sizeof(p->msg_in.hdr)); return 0; } static int dalrpc_read_msg(struct dalrpc_port *p) { uint8_t *read_ptr; int bytes_read; /* read msg header */ while (p->msg_bytes_read < sizeof(p->msg_in.hdr)) { read_ptr = (uint8_t *)&p->msg_in.hdr + p->msg_bytes_read; bytes_read = smd_read(p->ch, read_ptr, sizeof(p->msg_in.hdr) - p->msg_bytes_read); if (bytes_read <= 0) return 0; p->msg_bytes_read += bytes_read; if (p->msg_bytes_read == sizeof(p->msg_in.hdr) && check_header(p)) return 1; } /* read remainder of msg */ if (p->msg_in.hdr.msgid != DALRPC_MSGID_ASYNCH) read_ptr = (uint8_t *)&p->msg_owner->msg; else read_ptr = (uint8_t *)&p->msg_in; read_ptr += p->msg_bytes_read; while (p->msg_bytes_read < p->msg_in.hdr.len) { bytes_read = smd_read(p->ch, read_ptr, p->msg_in.hdr.len - p->msg_bytes_read); if (bytes_read <= 0) return 0; p->msg_bytes_read += bytes_read; read_ptr += bytes_read; } process_msg(p); p->msg_bytes_read = 0; p->msg_owner = NULL; return 1; } static void dalrpc_work(struct work_struct *work) { struct dalrpc_port *p = container_of(work, struct dalrpc_port, port_work); /* must lock port/client lists to ensure port doesn't disappear under an asynch event */ mutex_lock(&pc_lists_lock); if (port_exists(p)) while (dalrpc_read_msg(p)) ; mutex_unlock(&pc_lists_lock); } static void dalrpc_smd_cb(void *priv, unsigned smd_flags) { struct dalrpc_port *p = priv; if (smd_flags != SMD_EVENT_DATA) return; queue_work(p->wq, &p->port_work); } static struct dalrpc_port *dalrpc_port_open(char *port, int cpu) { struct dalrpc_port *p; char wq_name[32]; p = port_name_exists(port); if (p) { p->refcount++; return p; } p = kzalloc(sizeof(struct dalrpc_port), GFP_KERNEL); if (!p) return NULL; strncpy(p->port, port, sizeof(p->port) - 1); p->refcount = 1; snprintf(wq_name, sizeof(wq_name), "dalrpc_rcv_%s", port); p->wq = create_singlethread_workqueue(wq_name); if (!p->wq) { printk(KERN_ERR "dalrpc_init: unable to create workqueue\n"); goto no_wq; } INIT_WORK(&p->port_work, dalrpc_work); mutex_init(&p->write_lock); mutex_init(&p->event_list_lock); mutex_init(&p->cb_list_lock); INIT_LIST_HEAD(&p->event_list); INIT_LIST_HEAD(&p->cb_list); p->msg_owner = NULL; p->msg_bytes_read = 0; #if 1 //HK test if (smd_open(port, &p->ch, p, dalrpc_smd_cb)) { #else if (smd_named_open_on_edge(port, cpu, &p->ch, p, dalrpc_smd_cb)) { #endif printk(KERN_ERR "dalrpc_port_init() failed to open port\n"); goto no_smd; } list_add(&p->list, &port_list); return p; no_smd: destroy_workqueue(p->wq); no_wq: kfree(p); return NULL; } static void dalrpc_sendwait(struct daldevice_handle *h) { u8 *buf = (u8 *)&h->msg; int len = h->msg.hdr.len; int written; mutex_lock(&h->port->write_lock); do { if ((h->port->ch->recv->state != SMD_SS_OPENED) || (h->port->ch->send->state != SMD_SS_OPENED)) { printk(KERN_ERR "%s: smd channel %s not ready," " wait 100ms.\n", __func__, h->port->ch->name); mdelay(100); continue; } written = smd_write(h->port->ch, buf + (h->msg.hdr.len - len), len); if (written < 0) break; len -= written; } while (len); /* Original codes put wait_for_completion outside of mutex * that may cause the latter session overwrites data from * previous session before aDSP really gets it. Thus, move * wait_for_completion inside the mutex to prevent data * corruption. */ wait_for_completion(&h->read_completion); mutex_unlock(&h->port->write_lock); } int daldevice_attach(uint32_t device_id, char *port, int cpu, void **handle_ptr) { struct daldevice_handle *h; char dyn_port[DALRPC_MAX_PORTNAME_LEN + 1] = "DAL00"; int ret; int tries = 0; if (!port) port = dyn_port; if (strlen(port) > DALRPC_MAX_PORTNAME_LEN) return -EINVAL; h = kzalloc(sizeof(struct daldevice_handle), GFP_KERNEL); if (!h) { *handle_ptr = NULL; return -ENOMEM; } init_completion(&h->read_completion); mutex_init(&h->client_lock); mutex_lock(&pc_lists_lock); list_add(&h->list, &client_list); mutex_unlock(&pc_lists_lock); /* 3 attempts, enough for one each on the user specified port, the * dynamic discovery port, and the port recommended by the dynamic * discovery port */ while (tries < 3) { tries++; mutex_lock(&pc_lists_lock); h->port = dalrpc_port_open(port, cpu); if (!h->port) { list_del(&h->list); mutex_unlock(&pc_lists_lock); printk(KERN_ERR "daldevice_attach: could not " "open port\n"); kfree(h); *handle_ptr = NULL; return -EIO; } mutex_unlock(&pc_lists_lock); h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 4 + DALRPC_MAX_ATTACH_PARAM_LEN + DALRPC_MAX_SERVICE_NAME_LEN; h->msg.hdr.proto_ver = DALRPC_PROTOCOL_VERSION; h->msg.hdr.ddi_idx = 0; h->msg.hdr.msgid = 0x1; h->msg.hdr.prio = 0; h->msg.hdr.async = 0; h->msg.hdr.from = h; h->msg.hdr.to = 0; h->msg.param[0] = device_id; memset(&h->msg.param[1], 0, DALRPC_MAX_ATTACH_PARAM_LEN + DALRPC_MAX_SERVICE_NAME_LEN); dalrpc_sendwait(h); ret = h->msg.param[0]; if (ret == DALRPC_SUCCESS) { h->remote_handle = h->msg.hdr.from; *handle_ptr = h; break; } else if (strnlen((char *)&h->msg.param[1], DALRPC_MAX_PORTNAME_LEN)) { /* another port was recommended in the response. */ strncpy(dyn_port, (char *)&h->msg.param[1], DALRPC_MAX_PORTNAME_LEN); dyn_port[DALRPC_MAX_PORTNAME_LEN] = 0; port = dyn_port; } else if (port == dyn_port) { /* the dynamic discovery port (or port that * was recommended by it) did not recognize * the device id, give up */ daldevice_detach(h); break; } else /* the user specified port did not work, try * the dynamic discovery port */ port = dyn_port; port_close(h->port); } return ret; } EXPORT_SYMBOL(daldevice_attach); static void dalrpc_ddi_prologue(uint32_t ddi_idx, struct daldevice_handle *h) { h->msg.hdr.proto_ver = DALRPC_PROTOCOL_VERSION; h->msg.hdr.prio = 0; h->msg.hdr.async = 0; h->msg.hdr.msgid = DALRPC_MSGID_DDI; h->msg.hdr.from = h; h->msg.hdr.to = h->remote_handle; h->msg.hdr.ddi_idx = ddi_idx; } int daldevice_detach(void *handle) { struct daldevice_handle *h = handle; if (!client_exists(h)) return -EINVAL; dalrpc_ddi_prologue(0, h); if (!h->remote_handle) goto norpc; h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 4; h->msg.hdr.msgid = 0x2; h->msg.param[0] = 0; dalrpc_sendwait(h); norpc: mutex_lock(&pc_lists_lock); list_del(&h->list); mutex_unlock(&pc_lists_lock); port_close(h->port); kfree(h); return 0; } EXPORT_SYMBOL(daldevice_detach); uint32_t dalrpc_fcn_0(uint32_t ddi_idx, void *handle, uint32_t s1) { struct daldevice_handle *h = handle; uint32_t ret; uint32_t retry_count = 0; if (!client_exists(h)) { printk(KERN_ERR "client_exists FALSE\n"); return -EINVAL; } mutex_lock(&h->client_lock); again: dalrpc_ddi_prologue(ddi_idx, h); h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 4; h->msg.hdr.proto_id = 0; h->msg.param[0] = s1; dalrpc_sendwait(h); ret = h->msg.param[0]; if (ret && retry_count++ < MAX_RETRY_COUNT) { printk(KERN_INFO "*********** %s: %d retry %d times, ret %d\n", __func__, ddi_idx, retry_count, ret); mdelay(RETRY_DELAY); goto again; } mutex_unlock(&h->client_lock); return ret; } EXPORT_SYMBOL(dalrpc_fcn_0); uint32_t dalrpc_fcn_1(uint32_t ddi_idx, void *handle, uint32_t s1, uint32_t s2) { struct daldevice_handle *h = handle; uint32_t ret; uint32_t retry_count = 0; if (!client_exists(h)) return -EINVAL; mutex_lock(&h->client_lock); again: dalrpc_ddi_prologue(ddi_idx, h); h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 8; h->msg.hdr.proto_id = 1; h->msg.param[0] = s1; h->msg.param[1] = s2; dalrpc_sendwait(h); ret = h->msg.param[0]; if (ret && retry_count++ < MAX_RETRY_COUNT) { printk(KERN_INFO "*********** %s: %d retry %d times, ret %d\n", __func__, ddi_idx, retry_count, ret); mdelay(RETRY_DELAY); goto again; } mutex_unlock(&h->client_lock); return ret; } EXPORT_SYMBOL(dalrpc_fcn_1); uint32_t dalrpc_fcn_2(uint32_t ddi_idx, void *handle, uint32_t s1, uint32_t *p_s2) { struct daldevice_handle *h = handle; uint32_t ret; if (!client_exists(h)) return -EINVAL; mutex_lock(&h->client_lock); dalrpc_ddi_prologue(ddi_idx, h); h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 4; h->msg.hdr.proto_id = 2; h->msg.param[0] = s1; dalrpc_sendwait(h); if (h->msg.param[0] == DALRPC_SUCCESS) *p_s2 = h->msg.param[1]; ret = h->msg.param[0]; mutex_unlock(&h->client_lock); return ret; } EXPORT_SYMBOL(dalrpc_fcn_2); uint32_t dalrpc_fcn_3(uint32_t ddi_idx, void *handle, uint32_t s1, uint32_t s2, uint32_t s3) { struct daldevice_handle *h = handle; uint32_t ret; if (!client_exists(h)) return -EINVAL; mutex_lock(&h->client_lock); dalrpc_ddi_prologue(ddi_idx, h); h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 12; h->msg.hdr.proto_id = 3; h->msg.param[0] = s1; h->msg.param[1] = s2; h->msg.param[2] = s3; dalrpc_sendwait(h); ret = h->msg.param[0]; mutex_unlock(&h->client_lock); return ret; } EXPORT_SYMBOL(dalrpc_fcn_3); uint32_t dalrpc_fcn_4(uint32_t ddi_idx, void *handle, uint32_t s1, uint32_t s2, uint32_t *p_s3) { struct daldevice_handle *h = handle; uint32_t ret; if (!client_exists(h)) return -EINVAL; mutex_lock(&h->client_lock); dalrpc_ddi_prologue(ddi_idx, h); h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 8; h->msg.hdr.proto_id = 4; h->msg.param[0] = s1; h->msg.param[1] = s2; dalrpc_sendwait(h); if (h->msg.param[0] == DALRPC_SUCCESS) *p_s3 = h->msg.param[1]; ret = h->msg.param[0]; mutex_unlock(&h->client_lock); return ret; } EXPORT_SYMBOL(dalrpc_fcn_4); uint32_t dalrpc_fcn_5(uint32_t ddi_idx, void *handle, const void *ibuf, uint32_t ilen) { struct daldevice_handle *h = handle; uint32_t ret; uint32_t retry_count = 0; if ((ilen + 4) > DALRPC_MAX_PARAMS_SIZE) return -EINVAL; if (!client_exists(h)) return -EINVAL; mutex_lock(&h->client_lock); again: dalrpc_ddi_prologue(ddi_idx, h); h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 4 + ROUND_BUFLEN(ilen); h->msg.hdr.proto_id = 5; h->msg.param[0] = ilen; memcpy(&h->msg.param[1], ibuf, ilen); dalrpc_sendwait(h); ret = h->msg.param[0]; if (ret && retry_count++ < MAX_RETRY_COUNT) { printk(KERN_INFO "*********** %s: %d retry %d times, ret %d\n", __func__, ddi_idx, retry_count, ret); mdelay(RETRY_DELAY); goto again; } mutex_unlock(&h->client_lock); return ret; } EXPORT_SYMBOL(dalrpc_fcn_5); uint32_t dalrpc_fcn_6(uint32_t ddi_idx, void *handle, uint32_t s1, const void *ibuf, uint32_t ilen) { struct daldevice_handle *h = handle; uint32_t ret; uint32_t retry_count = 0; if ((ilen + 8) > DALRPC_MAX_PARAMS_SIZE) return -EINVAL; if (!client_exists(h)) return -EINVAL; mutex_lock(&h->client_lock); again: dalrpc_ddi_prologue(ddi_idx, h); h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 8 + ROUND_BUFLEN(ilen); h->msg.hdr.proto_id = 6; h->msg.param[0] = s1; h->msg.param[1] = ilen; memcpy(&h->msg.param[2], ibuf, ilen); dalrpc_sendwait(h); ret = h->msg.param[0]; if (ret && retry_count++ < MAX_RETRY_COUNT) { printk(KERN_INFO "*********** %s: %d retry %d times, ret %d\n", __func__, ddi_idx, retry_count, ret); mdelay(RETRY_DELAY); goto again; } mutex_unlock(&h->client_lock); return ret; } EXPORT_SYMBOL(dalrpc_fcn_6); uint32_t dalrpc_fcn_7(uint32_t ddi_idx, void *handle, const void *ibuf, uint32_t ilen, void *obuf, uint32_t olen, uint32_t *oalen) { struct daldevice_handle *h = handle; uint32_t ret; int param_idx; if ((ilen + 8) > DALRPC_MAX_PARAMS_SIZE || (olen + 4) > DALRPC_MAX_PARAMS_SIZE) return -EINVAL; if (!client_exists(h)) return -EINVAL; mutex_lock(&h->client_lock); dalrpc_ddi_prologue(ddi_idx, h); h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 8 + ROUND_BUFLEN(ilen); h->msg.hdr.proto_id = 7; h->msg.param[0] = ilen; memcpy(&h->msg.param[1], ibuf, ilen); param_idx = (ROUND_BUFLEN(ilen) / 4) + 1; h->msg.param[param_idx] = olen; dalrpc_sendwait(h); if (h->msg.param[0] == DALRPC_SUCCESS) { if (h->msg.param[1] > olen) { mutex_unlock(&h->client_lock); return -EIO; } *oalen = h->msg.param[1]; memcpy(obuf, &h->msg.param[2], h->msg.param[1]); } ret = h->msg.param[0]; mutex_unlock(&h->client_lock); return ret; } EXPORT_SYMBOL(dalrpc_fcn_7); uint32_t dalrpc_fcn_8(uint32_t ddi_idx, void *handle, const void *ibuf, uint32_t ilen, void *obuf, uint32_t olen) { struct daldevice_handle *h = handle; uint32_t ret; int param_idx; if ((ilen + 8) > DALRPC_MAX_PARAMS_SIZE || (olen + 4) > DALRPC_MAX_PARAMS_SIZE) return -EINVAL; if (!client_exists(h)) return -EINVAL; mutex_lock(&h->client_lock); dalrpc_ddi_prologue(ddi_idx, h); h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 8 + ROUND_BUFLEN(ilen); h->msg.hdr.proto_id = 8; h->msg.param[0] = ilen; memcpy(&h->msg.param[1], ibuf, ilen); param_idx = (ROUND_BUFLEN(ilen) / 4) + 1; h->msg.param[param_idx] = olen; dalrpc_sendwait(h); if (h->msg.param[0] == DALRPC_SUCCESS) { if (h->msg.param[1] > olen) { mutex_unlock(&h->client_lock); return -EIO; } memcpy(obuf, &h->msg.param[2], h->msg.param[1]); } ret = h->msg.param[0]; mutex_unlock(&h->client_lock); return ret; } EXPORT_SYMBOL(dalrpc_fcn_8); uint32_t dalrpc_fcn_9(uint32_t ddi_idx, void *handle, void *obuf, uint32_t olen) { struct daldevice_handle *h = handle; uint32_t ret; if ((olen + 4) > DALRPC_MAX_PARAMS_SIZE) return -EINVAL; if (!client_exists(h)) return -EINVAL; mutex_lock(&h->client_lock); dalrpc_ddi_prologue(ddi_idx, h); h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 4; h->msg.hdr.proto_id = 9; h->msg.param[0] = olen; dalrpc_sendwait(h); if (h->msg.param[0] == DALRPC_SUCCESS) { if (h->msg.param[1] > olen) { mutex_unlock(&h->client_lock); return -EIO; } memcpy(obuf, &h->msg.param[2], h->msg.param[1]); } ret = h->msg.param[0]; mutex_unlock(&h->client_lock); return ret; } EXPORT_SYMBOL(dalrpc_fcn_9); uint32_t dalrpc_fcn_10(uint32_t ddi_idx, void *handle, uint32_t s1, const void *ibuf, uint32_t ilen, void *obuf, uint32_t olen, uint32_t *oalen) { struct daldevice_handle *h = handle; uint32_t ret; int param_idx; if ((ilen + 12) > DALRPC_MAX_PARAMS_SIZE || (olen + 4) > DALRPC_MAX_PARAMS_SIZE) return -EINVAL; if (!client_exists(h)) return -EINVAL; mutex_lock(&h->client_lock); dalrpc_ddi_prologue(ddi_idx, h); h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 12 + ROUND_BUFLEN(ilen); h->msg.hdr.proto_id = 10; h->msg.param[0] = s1; h->msg.param[1] = ilen; memcpy(&h->msg.param[2], ibuf, ilen); param_idx = (ROUND_BUFLEN(ilen) / 4) + 2; h->msg.param[param_idx] = olen; dalrpc_sendwait(h); if (h->msg.param[0] == DALRPC_SUCCESS) { if (h->msg.param[1] > olen) { mutex_unlock(&h->client_lock); return -EIO; } *oalen = h->msg.param[1]; memcpy(obuf, &h->msg.param[2], h->msg.param[1]); } ret = h->msg.param[0]; mutex_unlock(&h->client_lock); return ret; } EXPORT_SYMBOL(dalrpc_fcn_10); uint32_t dalrpc_fcn_11(uint32_t ddi_idx, void *handle, uint32_t s1, void *obuf, uint32_t olen) { struct daldevice_handle *h = handle; uint32_t ret; if ((olen + 4) > DALRPC_MAX_PARAMS_SIZE) return -EINVAL; if (!client_exists(h)) return -EINVAL; mutex_lock(&h->client_lock); dalrpc_ddi_prologue(ddi_idx, h); h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 8; h->msg.hdr.proto_id = 11; h->msg.param[0] = s1; h->msg.param[1] = olen; dalrpc_sendwait(h); if (h->msg.param[0] == DALRPC_SUCCESS) { if (h->msg.param[1] > olen) { mutex_unlock(&h->client_lock); return -EIO; } memcpy(obuf, &h->msg.param[2], h->msg.param[1]); } ret = h->msg.param[0]; mutex_unlock(&h->client_lock); return ret; } EXPORT_SYMBOL(dalrpc_fcn_11); uint32_t dalrpc_fcn_12(uint32_t ddi_idx, void *handle, uint32_t s1, void *obuf, uint32_t olen, uint32_t *oalen) { struct daldevice_handle *h = handle; uint32_t ret; if ((olen + 4) > DALRPC_MAX_PARAMS_SIZE) return -EINVAL; if (!client_exists(h)) return -EINVAL; mutex_lock(&h->client_lock); dalrpc_ddi_prologue(ddi_idx, h); h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 8; h->msg.hdr.proto_id = 12; h->msg.param[0] = s1; h->msg.param[1] = olen; dalrpc_sendwait(h); if (h->msg.param[0] == DALRPC_SUCCESS) { if (h->msg.param[1] > olen) { mutex_unlock(&h->client_lock); return -EIO; } *oalen = h->msg.param[1]; memcpy(obuf, &h->msg.param[2], h->msg.param[1]); } ret = h->msg.param[0]; mutex_unlock(&h->client_lock); return ret; } EXPORT_SYMBOL(dalrpc_fcn_12); uint32_t dalrpc_fcn_13(uint32_t ddi_idx, void *handle, const void *ibuf, uint32_t ilen, const void *ibuf2, uint32_t ilen2, void *obuf, uint32_t olen) { struct daldevice_handle *h = handle; uint32_t ret; int param_idx; if ((ilen + ilen2 + 12) > DALRPC_MAX_PARAMS_SIZE || (olen + 4) > DALRPC_MAX_PARAMS_SIZE) return -EINVAL; if (!client_exists(h)) return -EINVAL; mutex_lock(&h->client_lock); dalrpc_ddi_prologue(ddi_idx, h); h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 12 + ROUND_BUFLEN(ilen) + ROUND_BUFLEN(ilen2); h->msg.hdr.proto_id = 13; h->msg.param[0] = ilen; memcpy(&h->msg.param[1], ibuf, ilen); param_idx = (ROUND_BUFLEN(ilen) / 4) + 1; h->msg.param[param_idx++] = ilen2; memcpy(&h->msg.param[param_idx], ibuf2, ilen2); param_idx += (ROUND_BUFLEN(ilen2) / 4); h->msg.param[param_idx] = olen; dalrpc_sendwait(h); if (h->msg.param[0] == DALRPC_SUCCESS) { if (h->msg.param[1] > olen) { mutex_unlock(&h->client_lock); return -EIO; } memcpy(obuf, &h->msg.param[2], h->msg.param[1]); } ret = h->msg.param[0]; mutex_unlock(&h->client_lock); return ret; } EXPORT_SYMBOL(dalrpc_fcn_13); uint32_t dalrpc_fcn_14(uint32_t ddi_idx, void *handle, const void *ibuf, uint32_t ilen, void *obuf, uint32_t olen, void *obuf2, uint32_t olen2, uint32_t *oalen2) { struct daldevice_handle *h = handle; uint32_t ret; int param_idx; if ((ilen + 12) > DALRPC_MAX_PARAMS_SIZE || (olen + olen2 + 8) > DALRPC_MAX_PARAMS_SIZE) return -EINVAL; if (!client_exists(h)) return -EINVAL; mutex_lock(&h->client_lock); dalrpc_ddi_prologue(ddi_idx, h); h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 12 + ROUND_BUFLEN(ilen); h->msg.hdr.proto_id = 14; h->msg.param[0] = ilen; memcpy(&h->msg.param[1], ibuf, ilen); param_idx = (ROUND_BUFLEN(ilen) / 4) + 1; h->msg.param[param_idx++] = olen; h->msg.param[param_idx] = olen2; dalrpc_sendwait(h); if (h->msg.param[0] == DALRPC_SUCCESS) { if (h->msg.param[1] > olen) { mutex_unlock(&h->client_lock); return -EIO; } param_idx = (ROUND_BUFLEN(h->msg.param[1]) / 4) + 2; if (h->msg.param[param_idx] > olen2) { mutex_unlock(&h->client_lock); return -EIO; } memcpy(obuf, &h->msg.param[2], h->msg.param[1]); memcpy(obuf2, &h->msg.param[param_idx + 1], h->msg.param[param_idx]); *oalen2 = h->msg.param[param_idx]; } ret = h->msg.param[0]; mutex_unlock(&h->client_lock); return ret; } EXPORT_SYMBOL(dalrpc_fcn_14); uint32_t dalrpc_fcn_15(uint32_t ddi_idx, void *handle, const void *ibuf, uint32_t ilen, const void *ibuf2, uint32_t ilen2, void *obuf, uint32_t olen, uint32_t *oalen, void *obuf2, uint32_t olen2) { struct daldevice_handle *h = handle; uint32_t ret; int param_idx; if ((ilen + ilen2 + 16) > DALRPC_MAX_PARAMS_SIZE || (olen + olen2 + 8) > DALRPC_MAX_PARAMS_SIZE) return -EINVAL; if (!client_exists(h)) return -EINVAL; mutex_lock(&h->client_lock); dalrpc_ddi_prologue(ddi_idx, h); h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 16 + ROUND_BUFLEN(ilen) + ROUND_BUFLEN(ilen2); h->msg.hdr.proto_id = 15; h->msg.param[0] = ilen; memcpy(&h->msg.param[1], ibuf, ilen); param_idx = (ROUND_BUFLEN(ilen) / 4) + 1; h->msg.param[param_idx++] = ilen2; memcpy(&h->msg.param[param_idx], ibuf2, ilen2); param_idx += (ROUND_BUFLEN(ilen2) / 4); h->msg.param[param_idx++] = olen; h->msg.param[param_idx] = olen2; dalrpc_sendwait(h); if (h->msg.param[0] == DALRPC_SUCCESS) { if (h->msg.param[1] > olen) { mutex_unlock(&h->client_lock); return -EIO; } param_idx = (ROUND_BUFLEN(h->msg.param[1]) / 4) + 2; if (h->msg.param[param_idx] > olen2) { mutex_unlock(&h->client_lock); return -EIO; } memcpy(obuf, &h->msg.param[2], h->msg.param[1]); memcpy(obuf2, &h->msg.param[param_idx + 1], h->msg.param[param_idx]); *oalen = h->msg.param[1]; } ret = h->msg.param[0]; mutex_unlock(&h->client_lock); return ret; } EXPORT_SYMBOL(dalrpc_fcn_15); void *dalrpc_alloc_event(void *handle) { struct daldevice_handle *h; struct dalrpc_event_handle *ev; h = (struct daldevice_handle *)handle; if (!client_exists(h)) return NULL; ev = kmalloc(sizeof(struct dalrpc_event_handle), GFP_KERNEL); if (!ev) return NULL; ev->flag = 0; spin_lock_init(&ev->lock); mutex_lock(&h->port->event_list_lock); list_add(&ev->list, &h->port->event_list); mutex_unlock(&h->port->event_list_lock); return ev; } EXPORT_SYMBOL(dalrpc_alloc_event); void *dalrpc_alloc_cb(void *handle, void (*fn)(void *, uint32_t, void *, uint32_t), void *context) { struct daldevice_handle *h; struct dalrpc_cb_handle *cb; h = (struct daldevice_handle *)handle; if (!client_exists(h)) return NULL; cb = kmalloc(sizeof(struct dalrpc_cb_handle), GFP_KERNEL); if (!cb) return NULL; cb->fn = fn; cb->context = context; mutex_lock(&h->port->cb_list_lock); list_add(&cb->list, &h->port->cb_list); mutex_unlock(&h->port->cb_list_lock); return cb; } EXPORT_SYMBOL(dalrpc_alloc_cb); void dalrpc_dealloc_event(void *handle, void *ev_h) { struct daldevice_handle *h; struct dalrpc_event_handle *ev; h = (struct daldevice_handle *)handle; ev = (struct dalrpc_event_handle *)ev_h; mutex_lock(&h->port->event_list_lock); list_del(&ev->list); mutex_unlock(&h->port->event_list_lock); kfree(ev); } EXPORT_SYMBOL(dalrpc_dealloc_event); void dalrpc_dealloc_cb(void *handle, void *cb_h) { struct daldevice_handle *h; struct dalrpc_cb_handle *cb; h = (struct daldevice_handle *)handle; cb = (struct dalrpc_cb_handle *)cb_h; mutex_lock(&h->port->cb_list_lock); list_del(&cb->list); mutex_unlock(&h->port->cb_list_lock); kfree(cb); } EXPORT_SYMBOL(dalrpc_dealloc_cb); static int event_occurred(int num_events, struct dalrpc_event_handle **events, int *occurred) { int i; for (i = 0; i < num_events; i++) { spin_lock(&events[i]->lock); if (events[i]->flag) { events[i]->flag = 0; spin_unlock(&events[i]->lock); *occurred = i; return 1; } spin_unlock(&events[i]->lock); } return 0; } int dalrpc_event_wait_multiple(int num, void **ev_h, int timeout) { struct dalrpc_event_handle **events; int ret, occurred; events = (struct dalrpc_event_handle **)ev_h; if (timeout == DALRPC_TIMEOUT_INFINITE) { wait_event(event_wq, event_occurred(num, events, &occurred)); return occurred; } ret = wait_event_timeout(event_wq, event_occurred(num, events, &occurred), timeout); if (ret > 0) return occurred; else return -ETIMEDOUT; } EXPORT_SYMBOL(dalrpc_event_wait_multiple);
gpl-2.0
wenfengliaoshuzhai/linux
net/rds/rdma.c
926
23038
/* * Copyright (c) 2007 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/rbtree.h> #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */ #include "rds.h" /* * XXX * - build with sparse * - should we limit the size of a mr region? let transport return failure? * - should we detect duplicate keys on a socket? hmm. * - an rdma is an mlock, apply rlimit? */ /* * get the number of pages by looking at the page indices that the start and * end addresses fall in. * * Returns 0 if the vec is invalid. It is invalid if the number of bytes * causes the address to wrap or overflows an unsigned int. This comes * from being stored in the 'length' member of 'struct scatterlist'. */ static unsigned int rds_pages_in_vec(struct rds_iovec *vec) { if ((vec->addr + vec->bytes <= vec->addr) || (vec->bytes > (u64)UINT_MAX)) return 0; return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) - (vec->addr >> PAGE_SHIFT); } static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key, struct rds_mr *insert) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct rds_mr *mr; while (*p) { parent = *p; mr = rb_entry(parent, struct rds_mr, r_rb_node); if (key < mr->r_key) p = &(*p)->rb_left; else if (key > mr->r_key) p = &(*p)->rb_right; else return mr; } if (insert) { rb_link_node(&insert->r_rb_node, parent, p); rb_insert_color(&insert->r_rb_node, root); atomic_inc(&insert->r_refcount); } return NULL; } /* * Destroy the transport-specific part of a MR. */ static void rds_destroy_mr(struct rds_mr *mr) { struct rds_sock *rs = mr->r_sock; void *trans_private = NULL; unsigned long flags; rdsdebug("RDS: destroy mr key is %x refcnt %u\n", mr->r_key, atomic_read(&mr->r_refcount)); if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state)) return; spin_lock_irqsave(&rs->rs_rdma_lock, flags); if (!RB_EMPTY_NODE(&mr->r_rb_node)) rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); trans_private = mr->r_trans_private; mr->r_trans_private = NULL; spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); if (trans_private) mr->r_trans->free_mr(trans_private, mr->r_invalidate); } void __rds_put_mr_final(struct rds_mr *mr) { rds_destroy_mr(mr); kfree(mr); } /* * By the time this is called we can't have any more ioctls called on * the socket so we don't need to worry about racing with others. */ void rds_rdma_drop_keys(struct rds_sock *rs) { struct rds_mr *mr; struct rb_node *node; unsigned long flags; /* Release any MRs associated with this socket */ spin_lock_irqsave(&rs->rs_rdma_lock, flags); while ((node = rb_first(&rs->rs_rdma_keys))) { mr = container_of(node, struct rds_mr, r_rb_node); if (mr->r_trans == rs->rs_transport) mr->r_invalidate = 0; rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); RB_CLEAR_NODE(&mr->r_rb_node); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); rds_destroy_mr(mr); rds_mr_put(mr); spin_lock_irqsave(&rs->rs_rdma_lock, flags); } spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); if (rs->rs_transport && rs->rs_transport->flush_mrs) rs->rs_transport->flush_mrs(); } /* * Helper function to pin user pages. */ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, struct page **pages, int write) { int ret; ret = get_user_pages_fast(user_addr, nr_pages, write, pages); if (ret >= 0 && ret < nr_pages) { while (ret--) put_page(pages[ret]); ret = -EFAULT; } return ret; } static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, u64 *cookie_ret, struct rds_mr **mr_ret) { struct rds_mr *mr = NULL, *found; unsigned int nr_pages; struct page **pages = NULL; struct scatterlist *sg; void *trans_private; unsigned long flags; rds_rdma_cookie_t cookie; unsigned int nents; long i; int ret; if (rs->rs_bound_addr == 0) { ret = -ENOTCONN; /* XXX not a great errno */ goto out; } if (!rs->rs_transport->get_mr) { ret = -EOPNOTSUPP; goto out; } nr_pages = rds_pages_in_vec(&args->vec); if (nr_pages == 0) { ret = -EINVAL; goto out; } rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n", args->vec.addr, args->vec.bytes, nr_pages); /* XXX clamp nr_pages to limit the size of this alloc? */ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) { ret = -ENOMEM; goto out; } mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL); if (!mr) { ret = -ENOMEM; goto out; } atomic_set(&mr->r_refcount, 1); RB_CLEAR_NODE(&mr->r_rb_node); mr->r_trans = rs->rs_transport; mr->r_sock = rs; if (args->flags & RDS_RDMA_USE_ONCE) mr->r_use_once = 1; if (args->flags & RDS_RDMA_INVALIDATE) mr->r_invalidate = 1; if (args->flags & RDS_RDMA_READWRITE) mr->r_write = 1; /* * Pin the pages that make up the user buffer and transfer the page * pointers to the mr's sg array. We check to see if we've mapped * the whole region after transferring the partial page references * to the sg array so that we can have one page ref cleanup path. * * For now we have no flag that tells us whether the mapping is * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to * the zero page. */ ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1); if (ret < 0) goto out; nents = ret; sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL); if (!sg) { ret = -ENOMEM; goto out; } WARN_ON(!nents); sg_init_table(sg, nents); /* Stick all pages into the scatterlist */ for (i = 0 ; i < nents; i++) sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0); rdsdebug("RDS: trans_private nents is %u\n", nents); /* Obtain a transport specific MR. If this succeeds, the * s/g list is now owned by the MR. * Note that dma_map() implies that pending writes are * flushed to RAM, so no dma_sync is needed here. */ trans_private = rs->rs_transport->get_mr(sg, nents, rs, &mr->r_key); if (IS_ERR(trans_private)) { for (i = 0 ; i < nents; i++) put_page(sg_page(&sg[i])); kfree(sg); ret = PTR_ERR(trans_private); goto out; } mr->r_trans_private = trans_private; rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n", mr->r_key, (void *)(unsigned long) args->cookie_addr); /* The user may pass us an unaligned address, but we can only * map page aligned regions. So we keep the offset, and build * a 64bit cookie containing <R_Key, offset> and pass that * around. */ cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK); if (cookie_ret) *cookie_ret = cookie; if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) { ret = -EFAULT; goto out; } /* Inserting the new MR into the rbtree bumps its * reference count. */ spin_lock_irqsave(&rs->rs_rdma_lock, flags); found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); BUG_ON(found && found != mr); rdsdebug("RDS: get_mr key is %x\n", mr->r_key); if (mr_ret) { atomic_inc(&mr->r_refcount); *mr_ret = mr; } ret = 0; out: kfree(pages); if (mr) rds_mr_put(mr); return ret; } int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen) { struct rds_get_mr_args args; if (optlen != sizeof(struct rds_get_mr_args)) return -EINVAL; if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval, sizeof(struct rds_get_mr_args))) return -EFAULT; return __rds_rdma_map(rs, &args, NULL, NULL); } int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) { struct rds_get_mr_for_dest_args args; struct rds_get_mr_args new_args; if (optlen != sizeof(struct rds_get_mr_for_dest_args)) return -EINVAL; if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval, sizeof(struct rds_get_mr_for_dest_args))) return -EFAULT; /* * Initially, just behave like get_mr(). * TODO: Implement get_mr as wrapper around this * and deprecate it. */ new_args.vec = args.vec; new_args.cookie_addr = args.cookie_addr; new_args.flags = args.flags; return __rds_rdma_map(rs, &new_args, NULL, NULL); } /* * Free the MR indicated by the given R_Key */ int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen) { struct rds_free_mr_args args; struct rds_mr *mr; unsigned long flags; if (optlen != sizeof(struct rds_free_mr_args)) return -EINVAL; if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval, sizeof(struct rds_free_mr_args))) return -EFAULT; /* Special case - a null cookie means flush all unused MRs */ if (args.cookie == 0) { if (!rs->rs_transport || !rs->rs_transport->flush_mrs) return -EINVAL; rs->rs_transport->flush_mrs(); return 0; } /* Look up the MR given its R_key and remove it from the rbtree * so nobody else finds it. * This should also prevent races with rds_rdma_unuse. */ spin_lock_irqsave(&rs->rs_rdma_lock, flags); mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL); if (mr) { rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); RB_CLEAR_NODE(&mr->r_rb_node); if (args.flags & RDS_RDMA_INVALIDATE) mr->r_invalidate = 1; } spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); if (!mr) return -EINVAL; /* * call rds_destroy_mr() ourselves so that we're sure it's done by the time * we return. If we let rds_mr_put() do it it might not happen until * someone else drops their ref. */ rds_destroy_mr(mr); rds_mr_put(mr); return 0; } /* * This is called when we receive an extension header that * tells us this MR was used. It allows us to implement * use_once semantics */ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force) { struct rds_mr *mr; unsigned long flags; int zot_me = 0; spin_lock_irqsave(&rs->rs_rdma_lock, flags); mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); if (!mr) { printk(KERN_ERR "rds: trying to unuse MR with unknown r_key %u!\n", r_key); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); return; } if (mr->r_use_once || force) { rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); RB_CLEAR_NODE(&mr->r_rb_node); zot_me = 1; } spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); /* May have to issue a dma_sync on this memory region. * Note we could avoid this if the operation was a RDMA READ, * but at this point we can't tell. */ if (mr->r_trans->sync_mr) mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE); /* If the MR was marked as invalidate, this will * trigger an async flush. */ if (zot_me) rds_destroy_mr(mr); rds_mr_put(mr); } void rds_rdma_free_op(struct rm_rdma_op *ro) { unsigned int i; for (i = 0; i < ro->op_nents; i++) { struct page *page = sg_page(&ro->op_sg[i]); /* Mark page dirty if it was possibly modified, which * is the case for a RDMA_READ which copies from remote * to local memory */ if (!ro->op_write) { BUG_ON(irqs_disabled()); set_page_dirty(page); } put_page(page); } kfree(ro->op_notifier); ro->op_notifier = NULL; ro->op_active = 0; } void rds_atomic_free_op(struct rm_atomic_op *ao) { struct page *page = sg_page(ao->op_sg); /* Mark page dirty if it was possibly modified, which * is the case for a RDMA_READ which copies from remote * to local memory */ set_page_dirty(page); put_page(page); kfree(ao->op_notifier); ao->op_notifier = NULL; ao->op_active = 0; } /* * Count the number of pages needed to describe an incoming iovec array. */ static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs) { int tot_pages = 0; unsigned int nr_pages; unsigned int i; /* figure out the number of pages in the vector */ for (i = 0; i < nr_iovecs; i++) { nr_pages = rds_pages_in_vec(&iov[i]); if (nr_pages == 0) return -EINVAL; tot_pages += nr_pages; /* * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, * so tot_pages cannot overflow without first going negative. */ if (tot_pages < 0) return -EINVAL; } return tot_pages; } int rds_rdma_extra_size(struct rds_rdma_args *args) { struct rds_iovec vec; struct rds_iovec __user *local_vec; int tot_pages = 0; unsigned int nr_pages; unsigned int i; local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; /* figure out the number of pages in the vector */ for (i = 0; i < args->nr_local; i++) { if (copy_from_user(&vec, &local_vec[i], sizeof(struct rds_iovec))) return -EFAULT; nr_pages = rds_pages_in_vec(&vec); if (nr_pages == 0) return -EINVAL; tot_pages += nr_pages; /* * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, * so tot_pages cannot overflow without first going negative. */ if (tot_pages < 0) return -EINVAL; } return tot_pages * sizeof(struct scatterlist); } /* * The application asks for a RDMA transfer. * Extract all arguments and set up the rdma_op */ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { struct rds_rdma_args *args; struct rm_rdma_op *op = &rm->rdma; int nr_pages; unsigned int nr_bytes; struct page **pages = NULL; struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack; int iov_size; unsigned int i, j; int ret = 0; if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) || rm->rdma.op_active) return -EINVAL; args = CMSG_DATA(cmsg); if (rs->rs_bound_addr == 0) { ret = -ENOTCONN; /* XXX not a great errno */ goto out_ret; } if (args->nr_local > UIO_MAXIOV) { ret = -EMSGSIZE; goto out_ret; } /* Check whether to allocate the iovec area */ iov_size = args->nr_local * sizeof(struct rds_iovec); if (args->nr_local > UIO_FASTIOV) { iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL); if (!iovs) { ret = -ENOMEM; goto out_ret; } } if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) { ret = -EFAULT; goto out; } nr_pages = rds_rdma_pages(iovs, args->nr_local); if (nr_pages < 0) { ret = -EINVAL; goto out; } pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) { ret = -ENOMEM; goto out; } op->op_write = !!(args->flags & RDS_RDMA_READWRITE); op->op_fence = !!(args->flags & RDS_RDMA_FENCE); op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); op->op_silent = !!(args->flags & RDS_RDMA_SILENT); op->op_active = 1; op->op_recverr = rs->rs_recverr; WARN_ON(!nr_pages); op->op_sg = rds_message_alloc_sgs(rm, nr_pages); if (!op->op_sg) { ret = -ENOMEM; goto out; } if (op->op_notify || op->op_recverr) { /* We allocate an uninitialized notifier here, because * we don't want to do that in the completion handler. We * would have to use GFP_ATOMIC there, and don't want to deal * with failed allocations. */ op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL); if (!op->op_notifier) { ret = -ENOMEM; goto out; } op->op_notifier->n_user_token = args->user_token; op->op_notifier->n_status = RDS_RDMA_SUCCESS; } /* The cookie contains the R_Key of the remote memory region, and * optionally an offset into it. This is how we implement RDMA into * unaligned memory. * When setting up the RDMA, we need to add that offset to the * destination address (which is really an offset into the MR) * FIXME: We may want to move this into ib_rdma.c */ op->op_rkey = rds_rdma_cookie_key(args->cookie); op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie); nr_bytes = 0; rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n", (unsigned long long)args->nr_local, (unsigned long long)args->remote_vec.addr, op->op_rkey); for (i = 0; i < args->nr_local; i++) { struct rds_iovec *iov = &iovs[i]; /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */ unsigned int nr = rds_pages_in_vec(iov); rs->rs_user_addr = iov->addr; rs->rs_user_bytes = iov->bytes; /* If it's a WRITE operation, we want to pin the pages for reading. * If it's a READ operation, we need to pin the pages for writing. */ ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write); if (ret < 0) goto out; rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n", nr_bytes, nr, iov->bytes, iov->addr); nr_bytes += iov->bytes; for (j = 0; j < nr; j++) { unsigned int offset = iov->addr & ~PAGE_MASK; struct scatterlist *sg; sg = &op->op_sg[op->op_nents + j]; sg_set_page(sg, pages[j], min_t(unsigned int, iov->bytes, PAGE_SIZE - offset), offset); rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n", sg->offset, sg->length, iov->addr, iov->bytes); iov->addr += sg->length; iov->bytes -= sg->length; } op->op_nents += nr; } if (nr_bytes > args->remote_vec.bytes) { rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n", nr_bytes, (unsigned int) args->remote_vec.bytes); ret = -EINVAL; goto out; } op->op_bytes = nr_bytes; out: if (iovs != iovstack) sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size); kfree(pages); out_ret: if (ret) rds_rdma_free_op(op); else rds_stats_inc(s_send_rdma); return ret; } /* * The application wants us to pass an RDMA destination (aka MR) * to the remote */ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { unsigned long flags; struct rds_mr *mr; u32 r_key; int err = 0; if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) || rm->m_rdma_cookie != 0) return -EINVAL; memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie)); /* We are reusing a previously mapped MR here. Most likely, the * application has written to the buffer, so we need to explicitly * flush those writes to RAM. Otherwise the HCA may not see them * when doing a DMA from that buffer. */ r_key = rds_rdma_cookie_key(rm->m_rdma_cookie); spin_lock_irqsave(&rs->rs_rdma_lock, flags); mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); if (!mr) err = -EINVAL; /* invalid r_key */ else atomic_inc(&mr->r_refcount); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); if (mr) { mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE); rm->rdma.op_rdma_mr = mr; } return err; } /* * The application passes us an address range it wants to enable RDMA * to/from. We map the area, and save the <R_Key,offset> pair * in rm->m_rdma_cookie. This causes it to be sent along to the peer * in an extension header. */ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) || rm->m_rdma_cookie != 0) return -EINVAL; return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr); } /* * Fill in rds_message for an atomic request. */ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { struct page *page = NULL; struct rds_atomic_args *args; int ret = 0; if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args)) || rm->atomic.op_active) return -EINVAL; args = CMSG_DATA(cmsg); /* Nonmasked & masked cmsg ops converted to masked hw ops */ switch (cmsg->cmsg_type) { case RDS_CMSG_ATOMIC_FADD: rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD; rm->atomic.op_m_fadd.add = args->fadd.add; rm->atomic.op_m_fadd.nocarry_mask = 0; break; case RDS_CMSG_MASKED_ATOMIC_FADD: rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD; rm->atomic.op_m_fadd.add = args->m_fadd.add; rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask; break; case RDS_CMSG_ATOMIC_CSWP: rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP; rm->atomic.op_m_cswp.compare = args->cswp.compare; rm->atomic.op_m_cswp.swap = args->cswp.swap; rm->atomic.op_m_cswp.compare_mask = ~0; rm->atomic.op_m_cswp.swap_mask = ~0; break; case RDS_CMSG_MASKED_ATOMIC_CSWP: rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP; rm->atomic.op_m_cswp.compare = args->m_cswp.compare; rm->atomic.op_m_cswp.swap = args->m_cswp.swap; rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask; rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask; break; default: BUG(); /* should never happen */ } rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT); rm->atomic.op_active = 1; rm->atomic.op_recverr = rs->rs_recverr; rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); if (!rm->atomic.op_sg) { ret = -ENOMEM; goto err; } /* verify 8 byte-aligned */ if (args->local_addr & 0x7) { ret = -EFAULT; goto err; } ret = rds_pin_pages(args->local_addr, 1, &page, 1); if (ret != 1) goto err; ret = 0; sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr)); if (rm->atomic.op_notify || rm->atomic.op_recverr) { /* We allocate an uninitialized notifier here, because * we don't want to do that in the completion handler. We * would have to use GFP_ATOMIC there, and don't want to deal * with failed allocations. */ rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL); if (!rm->atomic.op_notifier) { ret = -ENOMEM; goto err; } rm->atomic.op_notifier->n_user_token = args->user_token; rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS; } rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie); rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie); return ret; err: if (page) put_page(page); kfree(rm->atomic.op_notifier); return ret; }
gpl-2.0
gokhanmoral/siyahkernel3
sound/soc/pxa/zylonite.c
2718
7019
/* * zylonite.c -- SoC audio for Zylonite * * Copyright 2008 Wolfson Microelectronics PLC. * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/clk.h> #include <linux/i2c.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include "../codecs/wm9713.h" #include "pxa2xx-ac97.h" #include "pxa-ssp.h" /* * There is a physical switch SW15 on the board which changes the MCLK * for the WM9713 between the standard AC97 master clock and the * output of the CLK_POUT signal from the PXA. */ static int clk_pout; module_param(clk_pout, int, 0); MODULE_PARM_DESC(clk_pout, "Use CLK_POUT as WM9713 MCLK (SW15 on board)."); static struct clk *pout; static struct snd_soc_card zylonite; static const struct snd_soc_dapm_widget zylonite_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone", NULL), SND_SOC_DAPM_MIC("Headset Microphone", NULL), SND_SOC_DAPM_MIC("Handset Microphone", NULL), SND_SOC_DAPM_SPK("Multiactor", NULL), SND_SOC_DAPM_SPK("Headset Earpiece", NULL), }; /* Currently supported audio map */ static const struct snd_soc_dapm_route audio_map[] = { /* Headphone output connected to HPL/HPR */ { "Headphone", NULL, "HPL" }, { "Headphone", NULL, "HPR" }, /* On-board earpiece */ { "Headset Earpiece", NULL, "OUT3" }, /* Headphone mic */ { "MIC2A", NULL, "Mic Bias" }, { "Mic Bias", NULL, "Headset Microphone" }, /* On-board mic */ { "MIC1", NULL, "Mic Bias" }, { "Mic Bias", NULL, "Handset Microphone" }, /* Multiactor differentially connected over SPKL/SPKR */ { "Multiactor", NULL, "SPKL" }, { "Multiactor", NULL, "SPKR" }, }; static int zylonite_wm9713_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; if (clk_pout) snd_soc_dai_set_pll(rtd->codec_dai, 0, 0, clk_get_rate(pout), 0); snd_soc_dapm_new_controls(dapm, zylonite_dapm_widgets, ARRAY_SIZE(zylonite_dapm_widgets)); snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); /* Static setup for now */ snd_soc_dapm_enable_pin(dapm, "Headphone"); snd_soc_dapm_enable_pin(dapm, "Headset Earpiece"); snd_soc_dapm_sync(dapm); return 0; } static int zylonite_voice_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; unsigned int pll_out = 0; unsigned int wm9713_div = 0; int ret = 0; int rate = params_rate(params); int width = snd_pcm_format_physical_width(params_format(params)); /* Only support ratios that we can generate neatly from the AC97 * based master clock - in particular, this excludes 44.1kHz. * In most applications the voice DAC will be used for telephony * data so multiples of 8kHz will be the common case. */ switch (rate) { case 8000: wm9713_div = 12; break; case 16000: wm9713_div = 6; break; case 48000: wm9713_div = 2; break; default: /* Don't support OSS emulation */ return -EINVAL; } /* Add 1 to the width for the leading clock cycle */ pll_out = rate * (width + 1) * 8; ret = snd_soc_dai_set_sysclk(cpu_dai, PXA_SSP_CLK_AUDIO, 0, 1); if (ret < 0) return ret; ret = snd_soc_dai_set_pll(cpu_dai, 0, 0, 0, pll_out); if (ret < 0) return ret; if (clk_pout) ret = snd_soc_dai_set_clkdiv(codec_dai, WM9713_PCMCLK_PLL_DIV, WM9713_PCMDIV(wm9713_div)); else ret = snd_soc_dai_set_clkdiv(codec_dai, WM9713_PCMCLK_DIV, WM9713_PCMDIV(wm9713_div)); if (ret < 0) return ret; ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; return 0; } static struct snd_soc_ops zylonite_voice_ops = { .hw_params = zylonite_voice_hw_params, }; static struct snd_soc_dai_link zylonite_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", .codec_name = "wm9713-codec", .platform_name = "pxa-pcm-audio", .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9713-hifi", .init = zylonite_wm9713_init, }, { .name = "AC97 Aux", .stream_name = "AC97 Aux", .codec_name = "wm9713-codec", .platform_name = "pxa-pcm-audio", .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name = "wm9713-aux", }, { .name = "WM9713 Voice", .stream_name = "WM9713 Voice", .codec_name = "wm9713-codec", .platform_name = "pxa-pcm-audio", .cpu_dai_name = "pxa-ssp-dai.2", .codec_dai_name = "wm9713-voice", .ops = &zylonite_voice_ops, }, }; static int zylonite_probe(struct snd_soc_card *card) { int ret; if (clk_pout) { pout = clk_get(NULL, "CLK_POUT"); if (IS_ERR(pout)) { dev_err(card->dev, "Unable to obtain CLK_POUT: %ld\n", PTR_ERR(pout)); return PTR_ERR(pout); } ret = clk_enable(pout); if (ret != 0) { dev_err(card->dev, "Unable to enable CLK_POUT: %d\n", ret); clk_put(pout); return ret; } dev_dbg(card->dev, "MCLK enabled at %luHz\n", clk_get_rate(pout)); } return 0; } static int zylonite_remove(struct snd_soc_card *card) { if (clk_pout) { clk_disable(pout); clk_put(pout); } return 0; } static int zylonite_suspend_post(struct snd_soc_card *card) { if (clk_pout) clk_disable(pout); return 0; } static int zylonite_resume_pre(struct snd_soc_card *card) { int ret = 0; if (clk_pout) { ret = clk_enable(pout); if (ret != 0) dev_err(card->dev, "Unable to enable CLK_POUT: %d\n", ret); } return ret; } static struct snd_soc_card zylonite = { .name = "Zylonite", .probe = &zylonite_probe, .remove = &zylonite_remove, .suspend_post = &zylonite_suspend_post, .resume_pre = &zylonite_resume_pre, .dai_link = zylonite_dai, .num_links = ARRAY_SIZE(zylonite_dai), .owner = THIS_MODULE, }; static struct platform_device *zylonite_snd_ac97_device; static int __init zylonite_init(void) { int ret; zylonite_snd_ac97_device = platform_device_alloc("soc-audio", -1); if (!zylonite_snd_ac97_device) return -ENOMEM; platform_set_drvdata(zylonite_snd_ac97_device, &zylonite); ret = platform_device_add(zylonite_snd_ac97_device); if (ret != 0) platform_device_put(zylonite_snd_ac97_device); return ret; } static void __exit zylonite_exit(void) { platform_device_unregister(zylonite_snd_ac97_device); } module_init(zylonite_init); module_exit(zylonite_exit); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("ALSA SoC WM9713 Zylonite"); MODULE_LICENSE("GPL");
gpl-2.0
munjeni/kernel_htc_golfu
drivers/scsi/libsas/sas_task.c
3230
1057
#include <linux/kernel.h> #include <scsi/sas.h> #include <scsi/libsas.h> /* fill task_status_struct based on SSP response frame */ void sas_ssp_task_response(struct device *dev, struct sas_task *task, struct ssp_response_iu *iu) { struct task_status_struct *tstat = &task->task_status; tstat->resp = SAS_TASK_COMPLETE; if (iu->datapres == 0) tstat->stat = iu->status; else if (iu->datapres == 1) tstat->stat = iu->resp_data[3]; else if (iu->datapres == 2) { tstat->stat = SAM_STAT_CHECK_CONDITION; tstat->buf_valid_size = min_t(int, SAS_STATUS_BUF_SIZE, be32_to_cpu(iu->sense_data_len)); memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size); if (iu->status != SAM_STAT_CHECK_CONDITION) dev_printk(KERN_WARNING, dev, "dev %llx sent sense data, but " "stat(%x) is not CHECK CONDITION\n", SAS_ADDR(task->dev->sas_addr), iu->status); } else /* when datapres contains corrupt/unknown value... */ tstat->stat = SAM_STAT_CHECK_CONDITION; } EXPORT_SYMBOL_GPL(sas_ssp_task_response);
gpl-2.0
MinimalOS/android_kernel_moto_shamu
drivers/macintosh/windfarm_core.c
3486
11318
/* * Windfarm PowerMac thermal control. Core * * (c) Copyright 2005 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * Released under the term of the GNU GPL v2. * * This core code tracks the list of sensors & controls, register * clients, and holds the kernel thread used for control. * * TODO: * * Add some information about sensor/control type and data format to * sensors/controls, and have the sysfs attribute stuff be moved * generically here instead of hard coded in the platform specific * driver as it us currently * * This however requires solving some annoying lifetime issues with * sysfs which doesn't seem to have lifetime rules for struct attribute, * I may have to create full features kobjects for every sensor/control * instead which is a bit of an overkill imho */ #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/jiffies.h> #include <linux/reboot.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/freezer.h> #include <asm/prom.h> #include "windfarm.h" #define VERSION "0.2" #undef DEBUG #ifdef DEBUG #define DBG(args...) printk(args) #else #define DBG(args...) do { } while(0) #endif static LIST_HEAD(wf_controls); static LIST_HEAD(wf_sensors); static DEFINE_MUTEX(wf_lock); static BLOCKING_NOTIFIER_HEAD(wf_client_list); static int wf_client_count; static unsigned int wf_overtemp; static unsigned int wf_overtemp_counter; struct task_struct *wf_thread; static struct platform_device wf_platform_device = { .name = "windfarm", }; /* * Utilities & tick thread */ static inline void wf_notify(int event, void *param) { blocking_notifier_call_chain(&wf_client_list, event, param); } int wf_critical_overtemp(void) { static char * critical_overtemp_path = "/sbin/critical_overtemp"; char *argv[] = { critical_overtemp_path, NULL }; static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; return call_usermodehelper(critical_overtemp_path, argv, envp, UMH_WAIT_EXEC); } EXPORT_SYMBOL_GPL(wf_critical_overtemp); static int wf_thread_func(void *data) { unsigned long next, delay; next = jiffies; DBG("wf: thread started\n"); set_freezable(); while (!kthread_should_stop()) { try_to_freeze(); if (time_after_eq(jiffies, next)) { wf_notify(WF_EVENT_TICK, NULL); if (wf_overtemp) { wf_overtemp_counter++; /* 10 seconds overtemp, notify userland */ if (wf_overtemp_counter > 10) wf_critical_overtemp(); /* 30 seconds, shutdown */ if (wf_overtemp_counter > 30) { printk(KERN_ERR "windfarm: Overtemp " "for more than 30" " seconds, shutting down\n"); machine_power_off(); } } next += HZ; } delay = next - jiffies; if (delay <= HZ) schedule_timeout_interruptible(delay); } DBG("wf: thread stopped\n"); return 0; } static void wf_start_thread(void) { wf_thread = kthread_run(wf_thread_func, NULL, "kwindfarm"); if (IS_ERR(wf_thread)) { printk(KERN_ERR "windfarm: failed to create thread,err %ld\n", PTR_ERR(wf_thread)); wf_thread = NULL; } } static void wf_stop_thread(void) { if (wf_thread) kthread_stop(wf_thread); wf_thread = NULL; } /* * Controls */ static void wf_control_release(struct kref *kref) { struct wf_control *ct = container_of(kref, struct wf_control, ref); DBG("wf: Deleting control %s\n", ct->name); if (ct->ops && ct->ops->release) ct->ops->release(ct); else kfree(ct); } static ssize_t wf_show_control(struct device *dev, struct device_attribute *attr, char *buf) { struct wf_control *ctrl = container_of(attr, struct wf_control, attr); const char *typestr; s32 val = 0; int err; err = ctrl->ops->get_value(ctrl, &val); if (err < 0) { if (err == -EFAULT) return sprintf(buf, "<HW FAULT>\n"); return err; } switch(ctrl->type) { case WF_CONTROL_RPM_FAN: typestr = " RPM"; break; case WF_CONTROL_PWM_FAN: typestr = " %"; break; default: typestr = ""; } return sprintf(buf, "%d%s\n", val, typestr); } /* This is really only for debugging... */ static ssize_t wf_store_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct wf_control *ctrl = container_of(attr, struct wf_control, attr); int val; int err; char *endp; val = simple_strtoul(buf, &endp, 0); while (endp < buf + count && (*endp == ' ' || *endp == '\n')) ++endp; if (endp - buf < count) return -EINVAL; err = ctrl->ops->set_value(ctrl, val); if (err < 0) return err; return count; } int wf_register_control(struct wf_control *new_ct) { struct wf_control *ct; mutex_lock(&wf_lock); list_for_each_entry(ct, &wf_controls, link) { if (!strcmp(ct->name, new_ct->name)) { printk(KERN_WARNING "windfarm: trying to register" " duplicate control %s\n", ct->name); mutex_unlock(&wf_lock); return -EEXIST; } } kref_init(&new_ct->ref); list_add(&new_ct->link, &wf_controls); sysfs_attr_init(&new_ct->attr.attr); new_ct->attr.attr.name = new_ct->name; new_ct->attr.attr.mode = 0644; new_ct->attr.show = wf_show_control; new_ct->attr.store = wf_store_control; if (device_create_file(&wf_platform_device.dev, &new_ct->attr)) printk(KERN_WARNING "windfarm: device_create_file failed" " for %s\n", new_ct->name); /* the subsystem still does useful work without the file */ DBG("wf: Registered control %s\n", new_ct->name); wf_notify(WF_EVENT_NEW_CONTROL, new_ct); mutex_unlock(&wf_lock); return 0; } EXPORT_SYMBOL_GPL(wf_register_control); void wf_unregister_control(struct wf_control *ct) { mutex_lock(&wf_lock); list_del(&ct->link); mutex_unlock(&wf_lock); DBG("wf: Unregistered control %s\n", ct->name); kref_put(&ct->ref, wf_control_release); } EXPORT_SYMBOL_GPL(wf_unregister_control); struct wf_control * wf_find_control(const char *name) { struct wf_control *ct; mutex_lock(&wf_lock); list_for_each_entry(ct, &wf_controls, link) { if (!strcmp(ct->name, name)) { if (wf_get_control(ct)) ct = NULL; mutex_unlock(&wf_lock); return ct; } } mutex_unlock(&wf_lock); return NULL; } EXPORT_SYMBOL_GPL(wf_find_control); int wf_get_control(struct wf_control *ct) { if (!try_module_get(ct->ops->owner)) return -ENODEV; kref_get(&ct->ref); return 0; } EXPORT_SYMBOL_GPL(wf_get_control); void wf_put_control(struct wf_control *ct) { struct module *mod = ct->ops->owner; kref_put(&ct->ref, wf_control_release); module_put(mod); } EXPORT_SYMBOL_GPL(wf_put_control); /* * Sensors */ static void wf_sensor_release(struct kref *kref) { struct wf_sensor *sr = container_of(kref, struct wf_sensor, ref); DBG("wf: Deleting sensor %s\n", sr->name); if (sr->ops && sr->ops->release) sr->ops->release(sr); else kfree(sr); } static ssize_t wf_show_sensor(struct device *dev, struct device_attribute *attr, char *buf) { struct wf_sensor *sens = container_of(attr, struct wf_sensor, attr); s32 val = 0; int err; err = sens->ops->get_value(sens, &val); if (err < 0) return err; return sprintf(buf, "%d.%03d\n", FIX32TOPRINT(val)); } int wf_register_sensor(struct wf_sensor *new_sr) { struct wf_sensor *sr; mutex_lock(&wf_lock); list_for_each_entry(sr, &wf_sensors, link) { if (!strcmp(sr->name, new_sr->name)) { printk(KERN_WARNING "windfarm: trying to register" " duplicate sensor %s\n", sr->name); mutex_unlock(&wf_lock); return -EEXIST; } } kref_init(&new_sr->ref); list_add(&new_sr->link, &wf_sensors); sysfs_attr_init(&new_sr->attr.attr); new_sr->attr.attr.name = new_sr->name; new_sr->attr.attr.mode = 0444; new_sr->attr.show = wf_show_sensor; new_sr->attr.store = NULL; if (device_create_file(&wf_platform_device.dev, &new_sr->attr)) printk(KERN_WARNING "windfarm: device_create_file failed" " for %s\n", new_sr->name); /* the subsystem still does useful work without the file */ DBG("wf: Registered sensor %s\n", new_sr->name); wf_notify(WF_EVENT_NEW_SENSOR, new_sr); mutex_unlock(&wf_lock); return 0; } EXPORT_SYMBOL_GPL(wf_register_sensor); void wf_unregister_sensor(struct wf_sensor *sr) { mutex_lock(&wf_lock); list_del(&sr->link); mutex_unlock(&wf_lock); DBG("wf: Unregistered sensor %s\n", sr->name); wf_put_sensor(sr); } EXPORT_SYMBOL_GPL(wf_unregister_sensor); struct wf_sensor * wf_find_sensor(const char *name) { struct wf_sensor *sr; mutex_lock(&wf_lock); list_for_each_entry(sr, &wf_sensors, link) { if (!strcmp(sr->name, name)) { if (wf_get_sensor(sr)) sr = NULL; mutex_unlock(&wf_lock); return sr; } } mutex_unlock(&wf_lock); return NULL; } EXPORT_SYMBOL_GPL(wf_find_sensor); int wf_get_sensor(struct wf_sensor *sr) { if (!try_module_get(sr->ops->owner)) return -ENODEV; kref_get(&sr->ref); return 0; } EXPORT_SYMBOL_GPL(wf_get_sensor); void wf_put_sensor(struct wf_sensor *sr) { struct module *mod = sr->ops->owner; kref_put(&sr->ref, wf_sensor_release); module_put(mod); } EXPORT_SYMBOL_GPL(wf_put_sensor); /* * Client & notification */ int wf_register_client(struct notifier_block *nb) { int rc; struct wf_control *ct; struct wf_sensor *sr; mutex_lock(&wf_lock); rc = blocking_notifier_chain_register(&wf_client_list, nb); if (rc != 0) goto bail; wf_client_count++; list_for_each_entry(ct, &wf_controls, link) wf_notify(WF_EVENT_NEW_CONTROL, ct); list_for_each_entry(sr, &wf_sensors, link) wf_notify(WF_EVENT_NEW_SENSOR, sr); if (wf_client_count == 1) wf_start_thread(); bail: mutex_unlock(&wf_lock); return rc; } EXPORT_SYMBOL_GPL(wf_register_client); int wf_unregister_client(struct notifier_block *nb) { mutex_lock(&wf_lock); blocking_notifier_chain_unregister(&wf_client_list, nb); wf_client_count++; if (wf_client_count == 0) wf_stop_thread(); mutex_unlock(&wf_lock); return 0; } EXPORT_SYMBOL_GPL(wf_unregister_client); void wf_set_overtemp(void) { mutex_lock(&wf_lock); wf_overtemp++; if (wf_overtemp == 1) { printk(KERN_WARNING "windfarm: Overtemp condition detected !\n"); wf_overtemp_counter = 0; wf_notify(WF_EVENT_OVERTEMP, NULL); } mutex_unlock(&wf_lock); } EXPORT_SYMBOL_GPL(wf_set_overtemp); void wf_clear_overtemp(void) { mutex_lock(&wf_lock); WARN_ON(wf_overtemp == 0); if (wf_overtemp == 0) { mutex_unlock(&wf_lock); return; } wf_overtemp--; if (wf_overtemp == 0) { printk(KERN_WARNING "windfarm: Overtemp condition cleared !\n"); wf_notify(WF_EVENT_NORMALTEMP, NULL); } mutex_unlock(&wf_lock); } EXPORT_SYMBOL_GPL(wf_clear_overtemp); int wf_is_overtemp(void) { return (wf_overtemp != 0); } EXPORT_SYMBOL_GPL(wf_is_overtemp); static int __init windfarm_core_init(void) { DBG("wf: core loaded\n"); platform_device_register(&wf_platform_device); return 0; } static void __exit windfarm_core_exit(void) { BUG_ON(wf_client_count != 0); DBG("wf: core unloaded\n"); platform_device_unregister(&wf_platform_device); } module_init(windfarm_core_init); module_exit(windfarm_core_exit); MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); MODULE_DESCRIPTION("Core component of PowerMac thermal control"); MODULE_LICENSE("GPL");
gpl-2.0
TeamHackDroid/samsung-kernel-msm7x30
kernel/events/ring_buffer.c
3742
8039
/* * Performance events ring-buffer code: * * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> * * For licensing details see kernel-base/COPYING */ #include <linux/perf_event.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include "internal.h" static bool perf_output_space(struct ring_buffer *rb, unsigned long tail, unsigned long offset, unsigned long head) { unsigned long mask; if (!rb->writable) return true; mask = perf_data_size(rb) - 1; offset = (offset - tail) & mask; head = (head - tail) & mask; if ((int)(head - offset) < 0) return false; return true; } static void perf_output_wakeup(struct perf_output_handle *handle) { atomic_set(&handle->rb->poll, POLL_IN); handle->event->pending_wakeup = 1; irq_work_queue(&handle->event->pending); } /* * We need to ensure a later event_id doesn't publish a head when a former * event isn't done writing. However since we need to deal with NMIs we * cannot fully serialize things. * * We only publish the head (and generate a wakeup) when the outer-most * event completes. */ static void perf_output_get_handle(struct perf_output_handle *handle) { struct ring_buffer *rb = handle->rb; preempt_disable(); local_inc(&rb->nest); handle->wakeup = local_read(&rb->wakeup); } static void perf_output_put_handle(struct perf_output_handle *handle) { struct ring_buffer *rb = handle->rb; unsigned long head; again: head = local_read(&rb->head); /* * IRQ/NMI can happen here, which means we can miss a head update. */ if (!local_dec_and_test(&rb->nest)) goto out; /* * Publish the known good head. Rely on the full barrier implied * by atomic_dec_and_test() order the rb->head read and this * write. */ rb->user_page->data_head = head; /* * Now check if we missed an update, rely on the (compiler) * barrier in atomic_dec_and_test() to re-read rb->head. */ if (unlikely(head != local_read(&rb->head))) { local_inc(&rb->nest); goto again; } if (handle->wakeup != local_read(&rb->wakeup)) perf_output_wakeup(handle); out: preempt_enable(); } int perf_output_begin(struct perf_output_handle *handle, struct perf_event *event, unsigned int size) { struct ring_buffer *rb; unsigned long tail, offset, head; int have_lost; struct perf_sample_data sample_data; struct { struct perf_event_header header; u64 id; u64 lost; } lost_event; rcu_read_lock(); /* * For inherited events we send all the output towards the parent. */ if (event->parent) event = event->parent; rb = rcu_dereference(event->rb); if (!rb) goto out; handle->rb = rb; handle->event = event; if (!rb->nr_pages) goto out; have_lost = local_read(&rb->lost); if (have_lost) { lost_event.header.size = sizeof(lost_event); perf_event_header__init_id(&lost_event.header, &sample_data, event); size += lost_event.header.size; } perf_output_get_handle(handle); do { /* * Userspace could choose to issue a mb() before updating the * tail pointer. So that all reads will be completed before the * write is issued. */ tail = ACCESS_ONCE(rb->user_page->data_tail); smp_rmb(); offset = head = local_read(&rb->head); head += size; if (unlikely(!perf_output_space(rb, tail, offset, head))) goto fail; } while (local_cmpxchg(&rb->head, offset, head) != offset); if (head - local_read(&rb->wakeup) > rb->watermark) local_add(rb->watermark, &rb->wakeup); handle->page = offset >> (PAGE_SHIFT + page_order(rb)); handle->page &= rb->nr_pages - 1; handle->size = offset & ((PAGE_SIZE << page_order(rb)) - 1); handle->addr = rb->data_pages[handle->page]; handle->addr += handle->size; handle->size = (PAGE_SIZE << page_order(rb)) - handle->size; if (have_lost) { lost_event.header.type = PERF_RECORD_LOST; lost_event.header.misc = 0; lost_event.id = event->id; lost_event.lost = local_xchg(&rb->lost, 0); perf_output_put(handle, lost_event); perf_event__output_id_sample(event, handle, &sample_data); } return 0; fail: local_inc(&rb->lost); perf_output_put_handle(handle); out: rcu_read_unlock(); return -ENOSPC; } void perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len) { __output_copy(handle, buf, len); } void perf_output_end(struct perf_output_handle *handle) { perf_output_put_handle(handle); rcu_read_unlock(); } static void ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) { long max_size = perf_data_size(rb); if (watermark) rb->watermark = min(max_size, watermark); if (!rb->watermark) rb->watermark = max_size / 2; if (flags & RING_BUFFER_WRITABLE) rb->writable = 1; atomic_set(&rb->refcount, 1); INIT_LIST_HEAD(&rb->event_list); spin_lock_init(&rb->event_lock); } #ifndef CONFIG_PERF_USE_VMALLOC /* * Back perf_mmap() with regular GFP_KERNEL-0 pages. */ struct page * perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) { if (pgoff > rb->nr_pages) return NULL; if (pgoff == 0) return virt_to_page(rb->user_page); return virt_to_page(rb->data_pages[pgoff - 1]); } static void *perf_mmap_alloc_page(int cpu) { struct page *page; int node; node = (cpu == -1) ? cpu : cpu_to_node(cpu); page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) return NULL; return page_address(page); } struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) { struct ring_buffer *rb; unsigned long size; int i; size = sizeof(struct ring_buffer); size += nr_pages * sizeof(void *); rb = kzalloc(size, GFP_KERNEL); if (!rb) goto fail; rb->user_page = perf_mmap_alloc_page(cpu); if (!rb->user_page) goto fail_user_page; for (i = 0; i < nr_pages; i++) { rb->data_pages[i] = perf_mmap_alloc_page(cpu); if (!rb->data_pages[i]) goto fail_data_pages; } rb->nr_pages = nr_pages; ring_buffer_init(rb, watermark, flags); return rb; fail_data_pages: for (i--; i >= 0; i--) free_page((unsigned long)rb->data_pages[i]); free_page((unsigned long)rb->user_page); fail_user_page: kfree(rb); fail: return NULL; } static void perf_mmap_free_page(unsigned long addr) { struct page *page = virt_to_page((void *)addr); page->mapping = NULL; __free_page(page); } void rb_free(struct ring_buffer *rb) { int i; perf_mmap_free_page((unsigned long)rb->user_page); for (i = 0; i < rb->nr_pages; i++) perf_mmap_free_page((unsigned long)rb->data_pages[i]); kfree(rb); } #else struct page * perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) { if (pgoff > (1UL << page_order(rb))) return NULL; return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE); } static void perf_mmap_unmark_page(void *addr) { struct page *page = vmalloc_to_page(addr); page->mapping = NULL; } static void rb_free_work(struct work_struct *work) { struct ring_buffer *rb; void *base; int i, nr; rb = container_of(work, struct ring_buffer, work); nr = 1 << page_order(rb); base = rb->user_page; for (i = 0; i < nr + 1; i++) perf_mmap_unmark_page(base + (i * PAGE_SIZE)); vfree(base); kfree(rb); } void rb_free(struct ring_buffer *rb) { schedule_work(&rb->work); } struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) { struct ring_buffer *rb; unsigned long size; void *all_buf; size = sizeof(struct ring_buffer); size += sizeof(void *); rb = kzalloc(size, GFP_KERNEL); if (!rb) goto fail; INIT_WORK(&rb->work, rb_free_work); all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); if (!all_buf) goto fail_all_buf; rb->user_page = all_buf; rb->data_pages[0] = all_buf + PAGE_SIZE; rb->page_order = ilog2(nr_pages); rb->nr_pages = 1; ring_buffer_init(rb, watermark, flags); return rb; fail_all_buf: kfree(rb); fail: return NULL; } #endif
gpl-2.0
ztemt/NX505J_5.1_kernel
arch/tile/kernel/machine_kexec.c
4510
7275
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * * based on machine_kexec.c from other architectures in linux-2.6.18 */ #include <linux/mm.h> #include <linux/kexec.h> #include <linux/delay.h> #include <linux/reboot.h> #include <linux/errno.h> #include <linux/vmalloc.h> #include <linux/cpumask.h> #include <linux/kernel.h> #include <linux/elf.h> #include <linux/highmem.h> #include <linux/mmu_context.h> #include <linux/io.h> #include <linux/timex.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/cacheflush.h> #include <asm/checksum.h> #include <hv/hypervisor.h> /* * This stuff is not in elf.h and is not in any other kernel include. * This stuff is needed below in the little boot notes parser to * extract the command line so we can pass it to the hypervisor. */ struct Elf32_Bhdr { Elf32_Word b_signature; Elf32_Word b_size; Elf32_Half b_checksum; Elf32_Half b_records; }; #define ELF_BOOT_MAGIC 0x0E1FB007 #define EBN_COMMAND_LINE 0x00000004 #define roundupsz(X) (((X) + 3) & ~3) /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ void machine_shutdown(void) { /* * Normally we would stop all the other processors here, but * the check in machine_kexec_prepare below ensures we'll only * get this far if we've been booted with "nosmp" on the * command line or without CONFIG_SMP so there's nothing to do * here (for now). */ } void machine_crash_shutdown(struct pt_regs *regs) { /* * Cannot happen. This type of kexec is disabled on this * architecture (and enforced in machine_kexec_prepare below). */ } int machine_kexec_prepare(struct kimage *image) { if (num_online_cpus() > 1) { pr_warning("%s: detected attempt to kexec " "with num_online_cpus() > 1\n", __func__); return -ENOSYS; } if (image->type != KEXEC_TYPE_DEFAULT) { pr_warning("%s: detected attempt to kexec " "with unsupported type: %d\n", __func__, image->type); return -ENOSYS; } return 0; } void machine_kexec_cleanup(struct kimage *image) { /* * We did nothing in machine_kexec_prepare, * so we have nothing to do here. */ } /* * If we can find elf boot notes on this page, return the command * line. Otherwise, silently return null. Somewhat kludgy, but no * good way to do this without significantly rearchitecting the * architecture-independent kexec code. */ static unsigned char *kexec_bn2cl(void *pg) { struct Elf32_Bhdr *bhdrp; Elf32_Nhdr *nhdrp; unsigned char *desc; unsigned char *command_line; __sum16 csum; bhdrp = (struct Elf32_Bhdr *) pg; /* * This routine is invoked for every source page, so make * sure to quietly ignore every impossible page. */ if (bhdrp->b_signature != ELF_BOOT_MAGIC || bhdrp->b_size > PAGE_SIZE) return 0; /* * If we get a checksum mismatch, warn with the checksum * so we can diagnose better. */ csum = ip_compute_csum(pg, bhdrp->b_size); if (csum != 0) { pr_warning("%s: bad checksum %#x (size %d)\n", __func__, csum, bhdrp->b_size); return 0; } nhdrp = (Elf32_Nhdr *) (bhdrp + 1); while (nhdrp->n_type != EBN_COMMAND_LINE) { desc = (unsigned char *) (nhdrp + 1); desc += roundupsz(nhdrp->n_descsz); nhdrp = (Elf32_Nhdr *) desc; /* still in bounds? */ if ((unsigned char *) (nhdrp + 1) > ((unsigned char *) pg) + bhdrp->b_size) { pr_info("%s: out of bounds\n", __func__); return 0; } } command_line = (unsigned char *) (nhdrp + 1); desc = command_line; while (*desc != '\0') { desc++; if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) { pr_info("%s: ran off end of page\n", __func__); return 0; } } return command_line; } static void kexec_find_and_set_command_line(struct kimage *image) { kimage_entry_t *ptr, entry; unsigned char *command_line = 0; unsigned char *r; HV_Errno hverr; for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); ptr = (entry & IND_INDIRECTION) ? phys_to_virt((entry & PAGE_MASK)) : ptr + 1) { if ((entry & IND_SOURCE)) { void *va = kmap_atomic_pfn(entry >> PAGE_SHIFT); r = kexec_bn2cl(va); if (r) { command_line = r; break; } kunmap_atomic(va); } } if (command_line != 0) { pr_info("setting new command line to \"%s\"\n", command_line); hverr = hv_set_command_line( (HV_VirtAddr) command_line, strlen(command_line)); kunmap_atomic(command_line); } else { pr_info("%s: no command line found; making empty\n", __func__); hverr = hv_set_command_line((HV_VirtAddr) command_line, 0); } if (hverr) pr_warning("%s: hv_set_command_line returned error: %d\n", __func__, hverr); } /* * The kexec code range-checks all its PAs, so to avoid having it run * amok and allocate memory and then sequester it from every other * controller, we force it to come from controller zero. We also * disable the oom-killer since if we do end up running out of memory, * that almost certainly won't help. */ struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order) { gfp_mask |= __GFP_THISNODE | __GFP_NORETRY; return alloc_pages_node(0, gfp_mask, order); } static void setup_quasi_va_is_pa(void) { HV_PTE *pgtable; HV_PTE pte; int i; /* * Flush our TLB to prevent conflicts between the previous contents * and the new stuff we're about to add. */ local_flush_tlb_all(); /* setup VA is PA, at least up to PAGE_OFFSET */ pgtable = (HV_PTE *)current->mm->pgd; pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE); pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); for (i = 0; i < pgd_index(PAGE_OFFSET); i++) { unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT); if (pfn_valid(pfn)) __set_pte(&pgtable[i], pfn_pte(pfn, pte)); } } void machine_kexec(struct kimage *image) { void *reboot_code_buffer; void (*rnk)(unsigned long, void *, unsigned long) __noreturn; /* Mask all interrupts before starting to reboot. */ interrupt_mask_set_mask(~0ULL); kexec_find_and_set_command_line(image); /* * Adjust the home caching of the control page to be cached on * this cpu, and copy the assembly helper into the control * code page, which we map in the vmalloc area. */ homecache_change_page_home(image->control_code_page, 0, smp_processor_id()); reboot_code_buffer = vmap(&image->control_code_page, 1, 0, __pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE)); memcpy(reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); __flush_icache_range( (unsigned long) reboot_code_buffer, (unsigned long) reboot_code_buffer + relocate_new_kernel_size); setup_quasi_va_is_pa(); /* now call it */ rnk = reboot_code_buffer; (*rnk)(image->head, reboot_code_buffer, image->start); }
gpl-2.0
Vegaviet-Dev/android_kernel_pantech_ef63-common
arch/arm/mach-s3c64xx/clock.c
4766
23619
/* linux/arch/arm/plat-s3c64xx/clock.c * * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * S3C64XX Base clock support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/map.h> #include <mach/regs-sys.h> #include <mach/regs-clock.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/cpu-freq.h> #include <plat/clock.h> #include <plat/clock-clksrc.h> #include <plat/pll.h> /* fin_apll, fin_mpll and fin_epll are all the same clock, which we call * ext_xtal_mux for want of an actual name from the manual. */ static struct clk clk_ext_xtal_mux = { .name = "ext_xtal", }; #define clk_fin_apll clk_ext_xtal_mux #define clk_fin_mpll clk_ext_xtal_mux #define clk_fin_epll clk_ext_xtal_mux #define clk_fout_mpll clk_mpll #define clk_fout_epll clk_epll struct clk clk_h2 = { .name = "hclk2", .rate = 0, }; struct clk clk_27m = { .name = "clk_27m", .rate = 27000000, }; static int clk_48m_ctrl(struct clk *clk, int enable) { unsigned long flags; u32 val; /* can't rely on clock lock, this register has other usages */ local_irq_save(flags); val = __raw_readl(S3C64XX_OTHERS); if (enable) val |= S3C64XX_OTHERS_USBMASK; else val &= ~S3C64XX_OTHERS_USBMASK; __raw_writel(val, S3C64XX_OTHERS); local_irq_restore(flags); return 0; } struct clk clk_48m = { .name = "clk_48m", .rate = 48000000, .enable = clk_48m_ctrl, }; struct clk clk_xusbxti = { .name = "xusbxti", .rate = 48000000, }; static int inline s3c64xx_gate(void __iomem *reg, struct clk *clk, int enable) { unsigned int ctrlbit = clk->ctrlbit; u32 con; con = __raw_readl(reg); if (enable) con |= ctrlbit; else con &= ~ctrlbit; __raw_writel(con, reg); return 0; } static int s3c64xx_pclk_ctrl(struct clk *clk, int enable) { return s3c64xx_gate(S3C_PCLK_GATE, clk, enable); } static int s3c64xx_hclk_ctrl(struct clk *clk, int enable) { return s3c64xx_gate(S3C_HCLK_GATE, clk, enable); } int s3c64xx_sclk_ctrl(struct clk *clk, int enable) { return s3c64xx_gate(S3C_SCLK_GATE, clk, enable); } static struct clk init_clocks_off[] = { { .name = "nand", .parent = &clk_h, }, { .name = "rtc", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_RTC, }, { .name = "adc", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_TSADC, }, { .name = "i2c", #ifdef CONFIG_S3C_DEV_I2C1 .devname = "s3c2440-i2c.0", #else .devname = "s3c2440-i2c", #endif .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_IIC, }, { .name = "i2c", .devname = "s3c2440-i2c.1", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C6410_CLKCON_PCLK_I2C1, }, { .name = "iis", .devname = "samsung-i2s.0", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_IIS0, }, { .name = "iis", .devname = "samsung-i2s.1", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_IIS1, }, { #ifdef CONFIG_CPU_S3C6410 .name = "iis", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C6410_CLKCON_PCLK_IIS2, }, { #endif .name = "keypad", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_KEYPAD, }, { .name = "spi", .devname = "s3c64xx-spi.0", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_SPI0, }, { .name = "spi", .devname = "s3c64xx-spi.1", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_SPI1, }, { .name = "48m", .devname = "s3c-sdhci.0", .parent = &clk_48m, .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_MMC0_48, }, { .name = "48m", .devname = "s3c-sdhci.1", .parent = &clk_48m, .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_MMC1_48, }, { .name = "48m", .devname = "s3c-sdhci.2", .parent = &clk_48m, .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_MMC2_48, }, { .name = "ac97", .parent = &clk_p, .ctrlbit = S3C_CLKCON_PCLK_AC97, }, { .name = "cfcon", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_IHOST, }, { .name = "dma0", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_DMA0, }, { .name = "dma1", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_DMA1, }, { .name = "3dse", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_3DSE, }, { .name = "hclk_secur", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_SECUR, }, { .name = "sdma1", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_SDMA1, }, { .name = "sdma0", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_SDMA0, }, { .name = "hclk_jpeg", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_JPEG, }, { .name = "camif", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_CAMIF, }, { .name = "hclk_scaler", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_SCALER, }, { .name = "2d", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_2D, }, { .name = "tv", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_TV, }, { .name = "post0", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_POST0, }, { .name = "rot", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_ROT, }, { .name = "hclk_mfc", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_MFC, }, { .name = "pclk_mfc", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_MFC, }, { .name = "dac27", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_DAC27, }, { .name = "tv27", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_TV27, }, { .name = "scaler27", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_SCALER27, }, { .name = "sclk_scaler", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_SCALER, }, { .name = "post0_27", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_POST0_27, }, { .name = "secur", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_SECUR, }, { .name = "sclk_mfc", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_MFC, }, { .name = "cam", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_CAM, }, { .name = "sclk_jpeg", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_JPEG, }, }; static struct clk clk_48m_spi0 = { .name = "spi_48m", .devname = "s3c64xx-spi.0", .parent = &clk_48m, .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_SPI0_48, }; static struct clk clk_48m_spi1 = { .name = "spi_48m", .devname = "s3c64xx-spi.1", .parent = &clk_48m, .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_SPI1_48, }; static struct clk init_clocks[] = { { .name = "lcd", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_LCD, }, { .name = "gpio", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_GPIO, }, { .name = "usb-host", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_UHOST, }, { .name = "otg", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_USB, }, { .name = "timers", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_PWM, }, { .name = "uart", .devname = "s3c6400-uart.0", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_UART0, }, { .name = "uart", .devname = "s3c6400-uart.1", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_UART1, }, { .name = "uart", .devname = "s3c6400-uart.2", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_UART2, }, { .name = "uart", .devname = "s3c6400-uart.3", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_UART3, }, { .name = "watchdog", .parent = &clk_p, .ctrlbit = S3C_CLKCON_PCLK_WDT, }, }; static struct clk clk_hsmmc0 = { .name = "hsmmc", .devname = "s3c-sdhci.0", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_HSMMC0, }; static struct clk clk_hsmmc1 = { .name = "hsmmc", .devname = "s3c-sdhci.1", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_HSMMC1, }; static struct clk clk_hsmmc2 = { .name = "hsmmc", .devname = "s3c-sdhci.2", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_HSMMC2, }; static struct clk clk_fout_apll = { .name = "fout_apll", }; static struct clk *clk_src_apll_list[] = { [0] = &clk_fin_apll, [1] = &clk_fout_apll, }; static struct clksrc_sources clk_src_apll = { .sources = clk_src_apll_list, .nr_sources = ARRAY_SIZE(clk_src_apll_list), }; static struct clksrc_clk clk_mout_apll = { .clk = { .name = "mout_apll", }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 0, .size = 1 }, .sources = &clk_src_apll, }; static struct clk *clk_src_epll_list[] = { [0] = &clk_fin_epll, [1] = &clk_fout_epll, }; static struct clksrc_sources clk_src_epll = { .sources = clk_src_epll_list, .nr_sources = ARRAY_SIZE(clk_src_epll_list), }; static struct clksrc_clk clk_mout_epll = { .clk = { .name = "mout_epll", }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 2, .size = 1 }, .sources = &clk_src_epll, }; static struct clk *clk_src_mpll_list[] = { [0] = &clk_fin_mpll, [1] = &clk_fout_mpll, }; static struct clksrc_sources clk_src_mpll = { .sources = clk_src_mpll_list, .nr_sources = ARRAY_SIZE(clk_src_mpll_list), }; static struct clksrc_clk clk_mout_mpll = { .clk = { .name = "mout_mpll", }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 1, .size = 1 }, .sources = &clk_src_mpll, }; static unsigned int armclk_mask; static unsigned long s3c64xx_clk_arm_get_rate(struct clk *clk) { unsigned long rate = clk_get_rate(clk->parent); u32 clkdiv; /* divisor mask starts at bit0, so no need to shift */ clkdiv = __raw_readl(S3C_CLK_DIV0) & armclk_mask; return rate / (clkdiv + 1); } static unsigned long s3c64xx_clk_arm_round_rate(struct clk *clk, unsigned long rate) { unsigned long parent = clk_get_rate(clk->parent); u32 div; if (parent < rate) return parent; div = (parent / rate) - 1; if (div > armclk_mask) div = armclk_mask; return parent / (div + 1); } static int s3c64xx_clk_arm_set_rate(struct clk *clk, unsigned long rate) { unsigned long parent = clk_get_rate(clk->parent); u32 div; u32 val; if (rate < parent / (armclk_mask + 1)) return -EINVAL; rate = clk_round_rate(clk, rate); div = clk_get_rate(clk->parent) / rate; val = __raw_readl(S3C_CLK_DIV0); val &= ~armclk_mask; val |= (div - 1); __raw_writel(val, S3C_CLK_DIV0); return 0; } static struct clk clk_arm = { .name = "armclk", .parent = &clk_mout_apll.clk, .ops = &(struct clk_ops) { .get_rate = s3c64xx_clk_arm_get_rate, .set_rate = s3c64xx_clk_arm_set_rate, .round_rate = s3c64xx_clk_arm_round_rate, }, }; static unsigned long s3c64xx_clk_doutmpll_get_rate(struct clk *clk) { unsigned long rate = clk_get_rate(clk->parent); printk(KERN_DEBUG "%s: parent is %ld\n", __func__, rate); if (__raw_readl(S3C_CLK_DIV0) & S3C6400_CLKDIV0_MPLL_MASK) rate /= 2; return rate; } static struct clk_ops clk_dout_ops = { .get_rate = s3c64xx_clk_doutmpll_get_rate, }; static struct clk clk_dout_mpll = { .name = "dout_mpll", .parent = &clk_mout_mpll.clk, .ops = &clk_dout_ops, }; static struct clk *clkset_spi_mmc_list[] = { &clk_mout_epll.clk, &clk_dout_mpll, &clk_fin_epll, &clk_27m, }; static struct clksrc_sources clkset_spi_mmc = { .sources = clkset_spi_mmc_list, .nr_sources = ARRAY_SIZE(clkset_spi_mmc_list), }; static struct clk *clkset_irda_list[] = { &clk_mout_epll.clk, &clk_dout_mpll, NULL, &clk_27m, }; static struct clksrc_sources clkset_irda = { .sources = clkset_irda_list, .nr_sources = ARRAY_SIZE(clkset_irda_list), }; static struct clk *clkset_uart_list[] = { &clk_mout_epll.clk, &clk_dout_mpll, NULL, NULL }; static struct clksrc_sources clkset_uart = { .sources = clkset_uart_list, .nr_sources = ARRAY_SIZE(clkset_uart_list), }; static struct clk *clkset_uhost_list[] = { &clk_48m, &clk_mout_epll.clk, &clk_dout_mpll, &clk_fin_epll, }; static struct clksrc_sources clkset_uhost = { .sources = clkset_uhost_list, .nr_sources = ARRAY_SIZE(clkset_uhost_list), }; /* The peripheral clocks are all controlled via clocksource followed * by an optional divider and gate stage. We currently roll this into * one clock which hides the intermediate clock from the mux. * * Note, the JPEG clock can only be an even divider... * * The scaler and LCD clocks depend on the S3C64XX version, and also * have a common parent divisor so are not included here. */ /* clocks that feed other parts of the clock source tree */ static struct clk clk_iis_cd0 = { .name = "iis_cdclk0", }; static struct clk clk_iis_cd1 = { .name = "iis_cdclk1", }; static struct clk clk_iisv4_cd = { .name = "iis_cdclk_v4", }; static struct clk clk_pcm_cd = { .name = "pcm_cdclk", }; static struct clk *clkset_audio0_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_dout_mpll, [2] = &clk_fin_epll, [3] = &clk_iis_cd0, [4] = &clk_pcm_cd, }; static struct clksrc_sources clkset_audio0 = { .sources = clkset_audio0_list, .nr_sources = ARRAY_SIZE(clkset_audio0_list), }; static struct clk *clkset_audio1_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_dout_mpll, [2] = &clk_fin_epll, [3] = &clk_iis_cd1, [4] = &clk_pcm_cd, }; static struct clksrc_sources clkset_audio1 = { .sources = clkset_audio1_list, .nr_sources = ARRAY_SIZE(clkset_audio1_list), }; static struct clk *clkset_audio2_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_dout_mpll, [2] = &clk_fin_epll, [3] = &clk_iisv4_cd, [4] = &clk_pcm_cd, }; static struct clksrc_sources clkset_audio2 = { .sources = clkset_audio2_list, .nr_sources = ARRAY_SIZE(clkset_audio2_list), }; static struct clk *clkset_camif_list[] = { &clk_h2, }; static struct clksrc_sources clkset_camif = { .sources = clkset_camif_list, .nr_sources = ARRAY_SIZE(clkset_camif_list), }; static struct clksrc_clk clksrcs[] = { { .clk = { .name = "usb-bus-host", .ctrlbit = S3C_CLKCON_SCLK_UHOST, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 5, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV1, .shift = 20, .size = 4 }, .sources = &clkset_uhost, }, { .clk = { .name = "audio-bus", .devname = "samsung-i2s.0", .ctrlbit = S3C_CLKCON_SCLK_AUDIO0, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 7, .size = 3 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 8, .size = 4 }, .sources = &clkset_audio0, }, { .clk = { .name = "audio-bus", .devname = "samsung-i2s.1", .ctrlbit = S3C_CLKCON_SCLK_AUDIO1, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 10, .size = 3 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 12, .size = 4 }, .sources = &clkset_audio1, }, { .clk = { .name = "audio-bus", .devname = "samsung-i2s.2", .ctrlbit = S3C6410_CLKCON_SCLK_AUDIO2, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C6410_CLK_SRC2, .shift = 0, .size = 3 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 24, .size = 4 }, .sources = &clkset_audio2, }, { .clk = { .name = "irda-bus", .ctrlbit = S3C_CLKCON_SCLK_IRDA, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 24, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 20, .size = 4 }, .sources = &clkset_irda, }, { .clk = { .name = "camera", .ctrlbit = S3C_CLKCON_SCLK_CAM, .enable = s3c64xx_sclk_ctrl, }, .reg_div = { .reg = S3C_CLK_DIV0, .shift = 20, .size = 4 }, .reg_src = { .reg = NULL, .shift = 0, .size = 0 }, .sources = &clkset_camif, }, }; /* Where does UCLK0 come from? */ static struct clksrc_clk clk_sclk_uclk = { .clk = { .name = "uclk1", .ctrlbit = S3C_CLKCON_SCLK_UART, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 13, .size = 1 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 16, .size = 4 }, .sources = &clkset_uart, }; static struct clksrc_clk clk_sclk_mmc0 = { .clk = { .name = "mmc_bus", .devname = "s3c-sdhci.0", .ctrlbit = S3C_CLKCON_SCLK_MMC0, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 18, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV1, .shift = 0, .size = 4 }, .sources = &clkset_spi_mmc, }; static struct clksrc_clk clk_sclk_mmc1 = { .clk = { .name = "mmc_bus", .devname = "s3c-sdhci.1", .ctrlbit = S3C_CLKCON_SCLK_MMC1, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 20, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV1, .shift = 4, .size = 4 }, .sources = &clkset_spi_mmc, }; static struct clksrc_clk clk_sclk_mmc2 = { .clk = { .name = "mmc_bus", .devname = "s3c-sdhci.2", .ctrlbit = S3C_CLKCON_SCLK_MMC2, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 22, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV1, .shift = 8, .size = 4 }, .sources = &clkset_spi_mmc, }; static struct clksrc_clk clk_sclk_spi0 = { .clk = { .name = "spi-bus", .devname = "s3c64xx-spi.0", .ctrlbit = S3C_CLKCON_SCLK_SPI0, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 14, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 0, .size = 4 }, .sources = &clkset_spi_mmc, }; static struct clksrc_clk clk_sclk_spi1 = { .clk = { .name = "spi-bus", .devname = "s3c64xx-spi.1", .ctrlbit = S3C_CLKCON_SCLK_SPI1, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 16, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 4, .size = 4 }, .sources = &clkset_spi_mmc, }; /* Clock initialisation code */ static struct clksrc_clk *init_parents[] = { &clk_mout_apll, &clk_mout_epll, &clk_mout_mpll, }; static struct clksrc_clk *clksrc_cdev[] = { &clk_sclk_uclk, &clk_sclk_mmc0, &clk_sclk_mmc1, &clk_sclk_mmc2, &clk_sclk_spi0, &clk_sclk_spi1, }; static struct clk *clk_cdev[] = { &clk_hsmmc0, &clk_hsmmc1, &clk_hsmmc2, &clk_48m_spi0, &clk_48m_spi1, }; static struct clk_lookup s3c64xx_clk_lookup[] = { CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p), CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_sclk_uclk.clk), CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &clk_hsmmc0), CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &clk_hsmmc1), CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.0", &clk_hsmmc2), CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &clk_sclk_mmc0.clk), CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk), CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk), CLKDEV_INIT(NULL, "spi_busclk0", &clk_p), CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk1", &clk_sclk_spi0.clk), CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk2", &clk_48m_spi0), CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk1", &clk_sclk_spi1.clk), CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk2", &clk_48m_spi1), }; #define GET_DIV(clk, field) ((((clk) & field##_MASK) >> field##_SHIFT) + 1) void __init_or_cpufreq s3c64xx_setup_clocks(void) { struct clk *xtal_clk; unsigned long xtal; unsigned long fclk; unsigned long hclk; unsigned long hclk2; unsigned long pclk; unsigned long epll; unsigned long apll; unsigned long mpll; unsigned int ptr; u32 clkdiv0; printk(KERN_DEBUG "%s: registering clocks\n", __func__); clkdiv0 = __raw_readl(S3C_CLK_DIV0); printk(KERN_DEBUG "%s: clkdiv0 = %08x\n", __func__, clkdiv0); xtal_clk = clk_get(NULL, "xtal"); BUG_ON(IS_ERR(xtal_clk)); xtal = clk_get_rate(xtal_clk); clk_put(xtal_clk); printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal); /* For now assume the mux always selects the crystal */ clk_ext_xtal_mux.parent = xtal_clk; epll = s3c_get_pll6553x(xtal, __raw_readl(S3C_EPLL_CON0), __raw_readl(S3C_EPLL_CON1)); mpll = s3c6400_get_pll(xtal, __raw_readl(S3C_MPLL_CON)); apll = s3c6400_get_pll(xtal, __raw_readl(S3C_APLL_CON)); fclk = mpll; printk(KERN_INFO "S3C64XX: PLL settings, A=%ld, M=%ld, E=%ld\n", apll, mpll, epll); if(__raw_readl(S3C64XX_OTHERS) & S3C64XX_OTHERS_SYNCMUXSEL) /* Synchronous mode */ hclk2 = apll / GET_DIV(clkdiv0, S3C6400_CLKDIV0_HCLK2); else /* Asynchronous mode */ hclk2 = mpll / GET_DIV(clkdiv0, S3C6400_CLKDIV0_HCLK2); hclk = hclk2 / GET_DIV(clkdiv0, S3C6400_CLKDIV0_HCLK); pclk = hclk2 / GET_DIV(clkdiv0, S3C6400_CLKDIV0_PCLK); printk(KERN_INFO "S3C64XX: HCLK2=%ld, HCLK=%ld, PCLK=%ld\n", hclk2, hclk, pclk); clk_fout_mpll.rate = mpll; clk_fout_epll.rate = epll; clk_fout_apll.rate = apll; clk_h2.rate = hclk2; clk_h.rate = hclk; clk_p.rate = pclk; clk_f.rate = fclk; for (ptr = 0; ptr < ARRAY_SIZE(init_parents); ptr++) s3c_set_clksrc(init_parents[ptr], true); for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++) s3c_set_clksrc(&clksrcs[ptr], true); } static struct clk *clks1[] __initdata = { &clk_ext_xtal_mux, &clk_iis_cd0, &clk_iis_cd1, &clk_iisv4_cd, &clk_pcm_cd, &clk_mout_epll.clk, &clk_mout_mpll.clk, &clk_dout_mpll, &clk_arm, }; static struct clk *clks[] __initdata = { &clk_ext, &clk_epll, &clk_27m, &clk_48m, &clk_h2, &clk_xusbxti, }; /** * s3c64xx_register_clocks - register clocks for s3c6400 and s3c6410 * @xtal: The rate for the clock crystal feeding the PLLs. * @armclk_divlimit: Divisor mask for ARMCLK. * * Register the clocks for the S3C6400 and S3C6410 SoC range, such * as ARMCLK as well as the necessary parent clocks. * * This call does not setup the clocks, which is left to the * s3c64xx_setup_clocks() call which may be needed by the cpufreq * or resume code to re-set the clocks if the bootloader has changed * them. */ void __init s3c64xx_register_clocks(unsigned long xtal, unsigned armclk_divlimit) { unsigned int cnt; armclk_mask = armclk_divlimit; s3c24xx_register_baseclocks(xtal); s3c24xx_register_clocks(clks, ARRAY_SIZE(clks)); s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c24xx_register_clocks(clk_cdev, ARRAY_SIZE(clk_cdev)); for (cnt = 0; cnt < ARRAY_SIZE(clk_cdev); cnt++) s3c_disable_clocks(clk_cdev[cnt], 1); s3c24xx_register_clocks(clks1, ARRAY_SIZE(clks1)); s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs)); for (cnt = 0; cnt < ARRAY_SIZE(clksrc_cdev); cnt++) s3c_register_clksrc(clksrc_cdev[cnt], 1); clkdev_add_table(s3c64xx_clk_lookup, ARRAY_SIZE(s3c64xx_clk_lookup)); s3c_pwmclk_init(); }
gpl-2.0
Tesla-Redux-Devices/android_kernel_samsung_lt02ltespr
drivers/xen/privcmd.c
5022
9030
/****************************************************************************** * privcmd.c * * Interface to privileged domain-0 commands. * * Copyright (c) 2002-2004, K A Fraser, B Dragovic */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/uaccess.h> #include <linux/swap.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/seq_file.h> #include <linux/miscdevice.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/tlb.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> #include <xen/xen.h> #include <xen/privcmd.h> #include <xen/interface/xen.h> #include <xen/features.h> #include <xen/page.h> #include <xen/xen-ops.h> #include "privcmd.h" MODULE_LICENSE("GPL"); #ifndef HAVE_ARCH_PRIVCMD_MMAP static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); #endif static long privcmd_ioctl_hypercall(void __user *udata) { struct privcmd_hypercall hypercall; long ret; if (copy_from_user(&hypercall, udata, sizeof(hypercall))) return -EFAULT; ret = privcmd_call(hypercall.op, hypercall.arg[0], hypercall.arg[1], hypercall.arg[2], hypercall.arg[3], hypercall.arg[4]); return ret; } static void free_page_list(struct list_head *pages) { struct page *p, *n; list_for_each_entry_safe(p, n, pages, lru) __free_page(p); INIT_LIST_HEAD(pages); } /* * Given an array of items in userspace, return a list of pages * containing the data. If copying fails, either because of memory * allocation failure or a problem reading user memory, return an * error code; its up to the caller to dispose of any partial list. */ static int gather_array(struct list_head *pagelist, unsigned nelem, size_t size, void __user *data) { unsigned pageidx; void *pagedata; int ret; if (size > PAGE_SIZE) return 0; pageidx = PAGE_SIZE; pagedata = NULL; /* quiet, gcc */ while (nelem--) { if (pageidx > PAGE_SIZE-size) { struct page *page = alloc_page(GFP_KERNEL); ret = -ENOMEM; if (page == NULL) goto fail; pagedata = page_address(page); list_add_tail(&page->lru, pagelist); pageidx = 0; } ret = -EFAULT; if (copy_from_user(pagedata + pageidx, data, size)) goto fail; data += size; pageidx += size; } ret = 0; fail: return ret; } /* * Call function "fn" on each element of the array fragmented * over a list of pages. */ static int traverse_pages(unsigned nelem, size_t size, struct list_head *pos, int (*fn)(void *data, void *state), void *state) { void *pagedata; unsigned pageidx; int ret = 0; BUG_ON(size > PAGE_SIZE); pageidx = PAGE_SIZE; pagedata = NULL; /* hush, gcc */ while (nelem--) { if (pageidx > PAGE_SIZE-size) { struct page *page; pos = pos->next; page = list_entry(pos, struct page, lru); pagedata = page_address(page); pageidx = 0; } ret = (*fn)(pagedata + pageidx, state); if (ret) break; pageidx += size; } return ret; } struct mmap_mfn_state { unsigned long va; struct vm_area_struct *vma; domid_t domain; }; static int mmap_mfn_range(void *data, void *state) { struct privcmd_mmap_entry *msg = data; struct mmap_mfn_state *st = state; struct vm_area_struct *vma = st->vma; int rc; /* Do not allow range to wrap the address space. */ if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) || ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va)) return -EINVAL; /* Range chunks must be contiguous in va space. */ if ((msg->va != st->va) || ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) return -EINVAL; rc = xen_remap_domain_mfn_range(vma, msg->va & PAGE_MASK, msg->mfn, msg->npages, vma->vm_page_prot, st->domain); if (rc < 0) return rc; st->va += msg->npages << PAGE_SHIFT; return 0; } static long privcmd_ioctl_mmap(void __user *udata) { struct privcmd_mmap mmapcmd; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; int rc; LIST_HEAD(pagelist); struct mmap_mfn_state state; if (!xen_initial_domain()) return -EPERM; if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd))) return -EFAULT; rc = gather_array(&pagelist, mmapcmd.num, sizeof(struct privcmd_mmap_entry), mmapcmd.entry); if (rc || list_empty(&pagelist)) goto out; down_write(&mm->mmap_sem); { struct page *page = list_first_entry(&pagelist, struct page, lru); struct privcmd_mmap_entry *msg = page_address(page); vma = find_vma(mm, msg->va); rc = -EINVAL; if (!vma || (msg->va != vma->vm_start) || !privcmd_enforce_singleshot_mapping(vma)) goto out_up; } state.va = vma->vm_start; state.vma = vma; state.domain = mmapcmd.dom; rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry), &pagelist, mmap_mfn_range, &state); out_up: up_write(&mm->mmap_sem); out: free_page_list(&pagelist); return rc; } struct mmap_batch_state { domid_t domain; unsigned long va; struct vm_area_struct *vma; int err; xen_pfn_t __user *user; }; static int mmap_batch_fn(void *data, void *state) { xen_pfn_t *mfnp = data; struct mmap_batch_state *st = state; if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1, st->vma->vm_page_prot, st->domain) < 0) { *mfnp |= 0xf0000000U; st->err++; } st->va += PAGE_SIZE; return 0; } static int mmap_return_errors(void *data, void *state) { xen_pfn_t *mfnp = data; struct mmap_batch_state *st = state; return put_user(*mfnp, st->user++); } static struct vm_operations_struct privcmd_vm_ops; static long privcmd_ioctl_mmap_batch(void __user *udata) { int ret; struct privcmd_mmapbatch m; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long nr_pages; LIST_HEAD(pagelist); struct mmap_batch_state state; if (!xen_initial_domain()) return -EPERM; if (copy_from_user(&m, udata, sizeof(m))) return -EFAULT; nr_pages = m.num; if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT))) return -EINVAL; ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr); if (ret || list_empty(&pagelist)) goto out; down_write(&mm->mmap_sem); vma = find_vma(mm, m.addr); ret = -EINVAL; if (!vma || vma->vm_ops != &privcmd_vm_ops || (m.addr != vma->vm_start) || ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) || !privcmd_enforce_singleshot_mapping(vma)) { up_write(&mm->mmap_sem); goto out; } state.domain = m.dom; state.vma = vma; state.va = m.addr; state.err = 0; ret = traverse_pages(m.num, sizeof(xen_pfn_t), &pagelist, mmap_batch_fn, &state); up_write(&mm->mmap_sem); if (state.err > 0) { state.user = m.arr; ret = traverse_pages(m.num, sizeof(xen_pfn_t), &pagelist, mmap_return_errors, &state); } out: free_page_list(&pagelist); return ret; } static long privcmd_ioctl(struct file *file, unsigned int cmd, unsigned long data) { int ret = -ENOSYS; void __user *udata = (void __user *) data; switch (cmd) { case IOCTL_PRIVCMD_HYPERCALL: ret = privcmd_ioctl_hypercall(udata); break; case IOCTL_PRIVCMD_MMAP: ret = privcmd_ioctl_mmap(udata); break; case IOCTL_PRIVCMD_MMAPBATCH: ret = privcmd_ioctl_mmap_batch(udata); break; default: ret = -EINVAL; break; } return ret; } static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", vma, vma->vm_start, vma->vm_end, vmf->pgoff, vmf->virtual_address); return VM_FAULT_SIGBUS; } static struct vm_operations_struct privcmd_vm_ops = { .fault = privcmd_fault }; static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) { /* Unsupported for auto-translate guests. */ if (xen_feature(XENFEAT_auto_translated_physmap)) return -ENOSYS; /* DONTCOPY is essential for Xen because copy_page_range doesn't know * how to recreate these mappings */ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP; vma->vm_ops = &privcmd_vm_ops; vma->vm_private_data = NULL; return 0; } static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma) { return (xchg(&vma->vm_private_data, (void *)1) == NULL); } const struct file_operations xen_privcmd_fops = { .owner = THIS_MODULE, .unlocked_ioctl = privcmd_ioctl, .mmap = privcmd_mmap, }; EXPORT_SYMBOL_GPL(xen_privcmd_fops); static struct miscdevice privcmd_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "xen/privcmd", .fops = &xen_privcmd_fops, }; static int __init privcmd_init(void) { int err; if (!xen_domain()) return -ENODEV; err = misc_register(&privcmd_dev); if (err != 0) { printk(KERN_ERR "Could not register Xen privcmd device\n"); return err; } return 0; } static void __exit privcmd_exit(void) { misc_deregister(&privcmd_dev); } module_init(privcmd_init); module_exit(privcmd_exit);
gpl-2.0
showp1984/bricked-pyramid-3.0
arch/mips/math-emu/sp_sub.c
7838
4811
/* IEEE754 floating point arithmetic * single precision */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754sp.h" ieee754sp ieee754sp_sub(ieee754sp x, ieee754sp y) { COMPXSP; COMPYSP; EXPLODEXSP; EXPLODEYSP; CLEARCX; FLUSHXSP; FLUSHYSP; switch (CLPAIR(xc, yc)) { case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): SETCX(IEEE754_INVALID_OPERATION); return ieee754sp_nanxcpt(ieee754sp_indef(), "sub", x, y); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return y; case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): return x; /* Infinity handling */ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): if (xs != ys) return x; SETCX(IEEE754_INVALID_OPERATION); return ieee754sp_xcpt(ieee754sp_indef(), "sub", x, y); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): return ieee754sp_inf(ys ^ 1); case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): return x; /* Zero handling */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): if (xs != ys) return x; else return ieee754sp_zero(ieee754_csr.rm == IEEE754_RD); case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): return x; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): /* quick fix up */ DPSIGN(y) ^= 1; return y; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): SPDNORMX; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): SPDNORMY; break; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): SPDNORMX; break; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): break; } /* flip sign of y and handle as add */ ys ^= 1; assert(xm & SP_HIDDEN_BIT); assert(ym & SP_HIDDEN_BIT); /* provide guard,round and stick bit space */ xm <<= 3; ym <<= 3; if (xe > ye) { /* have to shift y fraction right to align */ int s = xe - ye; SPXSRSYn(s); } else if (ye > xe) { /* have to shift x fraction right to align */ int s = ye - xe; SPXSRSXn(s); } assert(xe == ye); assert(xe <= SP_EMAX); if (xs == ys) { /* generate 28 bit result of adding two 27 bit numbers */ xm = xm + ym; xe = xe; xs = xs; if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */ SPXSRSX1(); /* shift preserving sticky */ } } else { if (xm >= ym) { xm = xm - ym; xe = xe; xs = xs; } else { xm = ym - xm; xe = xe; xs = ys; } if (xm == 0) { if (ieee754_csr.rm == IEEE754_RD) return ieee754sp_zero(1); /* round negative inf. => sign = -1 */ else return ieee754sp_zero(0); /* other round modes => sign = 1 */ } /* normalize to rounding precision */ while ((xm >> (SP_MBITS + 3)) == 0) { xm <<= 1; xe--; } } SPNORMRET2(xs, xe, xm, "sub", x, y); }
gpl-2.0
TEAM-RAZOR-DEVICES/kernel_lge_v500
arch/mn10300/lib/bitops.c
9118
1087
/* MN10300 Non-trivial bit operations * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <asm/bitops.h> /* * try flipping a bit using BSET and BCLR */ void change_bit(unsigned long nr, volatile void *addr) { if (test_bit(nr, addr)) goto try_clear_bit; try_set_bit: if (!test_and_set_bit(nr, addr)) return; try_clear_bit: if (test_and_clear_bit(nr, addr)) return; goto try_set_bit; } /* * try flipping a bit using BSET and BCLR and returning the old value */ int test_and_change_bit(unsigned long nr, volatile void *addr) { if (test_bit(nr, addr)) goto try_clear_bit; try_set_bit: if (!test_and_set_bit(nr, addr)) return 0; try_clear_bit: if (test_and_clear_bit(nr, addr)) return 1; goto try_set_bit; }
gpl-2.0
pbystrup/CHIP-linux
drivers/input/gameport/ns558.c
13982
7301
/* * Copyright (c) 1999-2001 Vojtech Pavlik * Copyright (c) 1999 Brian Gerst */ /* * NS558 based standard IBM game port driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <asm/io.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/gameport.h> #include <linux/slab.h> #include <linux/pnp.h> MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("Classic gameport (ISA/PnP) driver"); MODULE_LICENSE("GPL"); static int ns558_isa_portlist[] = { 0x201, 0x200, 0x202, 0x203, 0x204, 0x205, 0x207, 0x209, 0x20b, 0x20c, 0x20e, 0x20f, 0x211, 0x219, 0x101, 0 }; struct ns558 { int type; int io; int size; struct pnp_dev *dev; struct gameport *gameport; struct list_head node; }; static LIST_HEAD(ns558_list); /* * ns558_isa_probe() tries to find an isa gameport at the * specified address, and also checks for mirrors. * A joystick must be attached for this to work. */ static int ns558_isa_probe(int io) { int i, j, b; unsigned char c, u, v; struct ns558 *ns558; struct gameport *port; /* * No one should be using this address. */ if (!request_region(io, 1, "ns558-isa")) return -EBUSY; /* * We must not be able to write arbitrary values to the port. * The lower two axis bits must be 1 after a write. */ c = inb(io); outb(~c & ~3, io); if (~(u = v = inb(io)) & 3) { outb(c, io); release_region(io, 1); return -ENODEV; } /* * After a trigger, there must be at least some bits changing. */ for (i = 0; i < 1000; i++) v &= inb(io); if (u == v) { outb(c, io); release_region(io, 1); return -ENODEV; } msleep(3); /* * After some time (4ms) the axes shouldn't change anymore. */ u = inb(io); for (i = 0; i < 1000; i++) if ((u ^ inb(io)) & 0xf) { outb(c, io); release_region(io, 1); return -ENODEV; } /* * And now find the number of mirrors of the port. */ for (i = 1; i < 5; i++) { release_region(io & (-1 << (i - 1)), (1 << (i - 1))); if (!request_region(io & (-1 << i), (1 << i), "ns558-isa")) break; /* Don't disturb anyone */ outb(0xff, io & (-1 << i)); for (j = b = 0; j < 1000; j++) if (inb(io & (-1 << i)) != inb((io & (-1 << i)) + (1 << i) - 1)) b++; msleep(3); if (b > 300) { /* We allow 30% difference */ release_region(io & (-1 << i), (1 << i)); break; } } i--; if (i != 4) { if (!request_region(io & (-1 << i), (1 << i), "ns558-isa")) return -EBUSY; } ns558 = kzalloc(sizeof(struct ns558), GFP_KERNEL); port = gameport_allocate_port(); if (!ns558 || !port) { printk(KERN_ERR "ns558: Memory allocation failed.\n"); release_region(io & (-1 << i), (1 << i)); kfree(ns558); gameport_free_port(port); return -ENOMEM; } ns558->io = io; ns558->size = 1 << i; ns558->gameport = port; port->io = io; gameport_set_name(port, "NS558 ISA Gameport"); gameport_set_phys(port, "isa%04x/gameport0", io & (-1 << i)); gameport_register_port(port); list_add(&ns558->node, &ns558_list); return 0; } #ifdef CONFIG_PNP static const struct pnp_device_id pnp_devids[] = { { .id = "@P@0001", .driver_data = 0 }, /* ALS 100 */ { .id = "@P@0020", .driver_data = 0 }, /* ALS 200 */ { .id = "@P@1001", .driver_data = 0 }, /* ALS 100+ */ { .id = "@P@2001", .driver_data = 0 }, /* ALS 120 */ { .id = "ASB16fd", .driver_data = 0 }, /* AdLib NSC16 */ { .id = "AZT3001", .driver_data = 0 }, /* AZT1008 */ { .id = "CDC0001", .driver_data = 0 }, /* Opl3-SAx */ { .id = "CSC0001", .driver_data = 0 }, /* CS4232 */ { .id = "CSC000f", .driver_data = 0 }, /* CS4236 */ { .id = "CSC0101", .driver_data = 0 }, /* CS4327 */ { .id = "CTL7001", .driver_data = 0 }, /* SB16 */ { .id = "CTL7002", .driver_data = 0 }, /* AWE64 */ { .id = "CTL7005", .driver_data = 0 }, /* Vibra16 */ { .id = "ENS2020", .driver_data = 0 }, /* SoundscapeVIVO */ { .id = "ESS0001", .driver_data = 0 }, /* ES1869 */ { .id = "ESS0005", .driver_data = 0 }, /* ES1878 */ { .id = "ESS6880", .driver_data = 0 }, /* ES688 */ { .id = "IBM0012", .driver_data = 0 }, /* CS4232 */ { .id = "OPT0001", .driver_data = 0 }, /* OPTi Audio16 */ { .id = "YMH0006", .driver_data = 0 }, /* Opl3-SA */ { .id = "YMH0022", .driver_data = 0 }, /* Opl3-SAx */ { .id = "PNPb02f", .driver_data = 0 }, /* Generic */ { .id = "", }, }; MODULE_DEVICE_TABLE(pnp, pnp_devids); static int ns558_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *did) { int ioport, iolen; struct ns558 *ns558; struct gameport *port; if (!pnp_port_valid(dev, 0)) { printk(KERN_WARNING "ns558: No i/o ports on a gameport? Weird\n"); return -ENODEV; } ioport = pnp_port_start(dev, 0); iolen = pnp_port_len(dev, 0); if (!request_region(ioport, iolen, "ns558-pnp")) return -EBUSY; ns558 = kzalloc(sizeof(struct ns558), GFP_KERNEL); port = gameport_allocate_port(); if (!ns558 || !port) { printk(KERN_ERR "ns558: Memory allocation failed\n"); kfree(ns558); gameport_free_port(port); return -ENOMEM; } ns558->io = ioport; ns558->size = iolen; ns558->dev = dev; ns558->gameport = port; gameport_set_name(port, "NS558 PnP Gameport"); gameport_set_phys(port, "pnp%s/gameport0", dev_name(&dev->dev)); port->dev.parent = &dev->dev; port->io = ioport; gameport_register_port(port); list_add_tail(&ns558->node, &ns558_list); return 0; } static struct pnp_driver ns558_pnp_driver = { .name = "ns558", .id_table = pnp_devids, .probe = ns558_pnp_probe, }; #else static struct pnp_driver ns558_pnp_driver; #endif static int __init ns558_init(void) { int i = 0; int error; error = pnp_register_driver(&ns558_pnp_driver); if (error && error != -ENODEV) /* should be ENOSYS really */ return error; /* * Probe ISA ports after PnP, so that PnP ports that are already * enabled get detected as PnP. This may be suboptimal in multi-device * configurations, but saves hassle with simple setups. */ while (ns558_isa_portlist[i]) ns558_isa_probe(ns558_isa_portlist[i++]); return list_empty(&ns558_list) && error ? -ENODEV : 0; } static void __exit ns558_exit(void) { struct ns558 *ns558, *safe; list_for_each_entry_safe(ns558, safe, &ns558_list, node) { gameport_unregister_port(ns558->gameport); release_region(ns558->io & ~(ns558->size - 1), ns558->size); kfree(ns558); } pnp_unregister_driver(&ns558_pnp_driver); } module_init(ns558_init); module_exit(ns558_exit);
gpl-2.0
eoghan2t9/kernel_qcom_cfx
drivers/input/gameport/ns558.c
13982
7301
/* * Copyright (c) 1999-2001 Vojtech Pavlik * Copyright (c) 1999 Brian Gerst */ /* * NS558 based standard IBM game port driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <asm/io.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/gameport.h> #include <linux/slab.h> #include <linux/pnp.h> MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("Classic gameport (ISA/PnP) driver"); MODULE_LICENSE("GPL"); static int ns558_isa_portlist[] = { 0x201, 0x200, 0x202, 0x203, 0x204, 0x205, 0x207, 0x209, 0x20b, 0x20c, 0x20e, 0x20f, 0x211, 0x219, 0x101, 0 }; struct ns558 { int type; int io; int size; struct pnp_dev *dev; struct gameport *gameport; struct list_head node; }; static LIST_HEAD(ns558_list); /* * ns558_isa_probe() tries to find an isa gameport at the * specified address, and also checks for mirrors. * A joystick must be attached for this to work. */ static int ns558_isa_probe(int io) { int i, j, b; unsigned char c, u, v; struct ns558 *ns558; struct gameport *port; /* * No one should be using this address. */ if (!request_region(io, 1, "ns558-isa")) return -EBUSY; /* * We must not be able to write arbitrary values to the port. * The lower two axis bits must be 1 after a write. */ c = inb(io); outb(~c & ~3, io); if (~(u = v = inb(io)) & 3) { outb(c, io); release_region(io, 1); return -ENODEV; } /* * After a trigger, there must be at least some bits changing. */ for (i = 0; i < 1000; i++) v &= inb(io); if (u == v) { outb(c, io); release_region(io, 1); return -ENODEV; } msleep(3); /* * After some time (4ms) the axes shouldn't change anymore. */ u = inb(io); for (i = 0; i < 1000; i++) if ((u ^ inb(io)) & 0xf) { outb(c, io); release_region(io, 1); return -ENODEV; } /* * And now find the number of mirrors of the port. */ for (i = 1; i < 5; i++) { release_region(io & (-1 << (i - 1)), (1 << (i - 1))); if (!request_region(io & (-1 << i), (1 << i), "ns558-isa")) break; /* Don't disturb anyone */ outb(0xff, io & (-1 << i)); for (j = b = 0; j < 1000; j++) if (inb(io & (-1 << i)) != inb((io & (-1 << i)) + (1 << i) - 1)) b++; msleep(3); if (b > 300) { /* We allow 30% difference */ release_region(io & (-1 << i), (1 << i)); break; } } i--; if (i != 4) { if (!request_region(io & (-1 << i), (1 << i), "ns558-isa")) return -EBUSY; } ns558 = kzalloc(sizeof(struct ns558), GFP_KERNEL); port = gameport_allocate_port(); if (!ns558 || !port) { printk(KERN_ERR "ns558: Memory allocation failed.\n"); release_region(io & (-1 << i), (1 << i)); kfree(ns558); gameport_free_port(port); return -ENOMEM; } ns558->io = io; ns558->size = 1 << i; ns558->gameport = port; port->io = io; gameport_set_name(port, "NS558 ISA Gameport"); gameport_set_phys(port, "isa%04x/gameport0", io & (-1 << i)); gameport_register_port(port); list_add(&ns558->node, &ns558_list); return 0; } #ifdef CONFIG_PNP static const struct pnp_device_id pnp_devids[] = { { .id = "@P@0001", .driver_data = 0 }, /* ALS 100 */ { .id = "@P@0020", .driver_data = 0 }, /* ALS 200 */ { .id = "@P@1001", .driver_data = 0 }, /* ALS 100+ */ { .id = "@P@2001", .driver_data = 0 }, /* ALS 120 */ { .id = "ASB16fd", .driver_data = 0 }, /* AdLib NSC16 */ { .id = "AZT3001", .driver_data = 0 }, /* AZT1008 */ { .id = "CDC0001", .driver_data = 0 }, /* Opl3-SAx */ { .id = "CSC0001", .driver_data = 0 }, /* CS4232 */ { .id = "CSC000f", .driver_data = 0 }, /* CS4236 */ { .id = "CSC0101", .driver_data = 0 }, /* CS4327 */ { .id = "CTL7001", .driver_data = 0 }, /* SB16 */ { .id = "CTL7002", .driver_data = 0 }, /* AWE64 */ { .id = "CTL7005", .driver_data = 0 }, /* Vibra16 */ { .id = "ENS2020", .driver_data = 0 }, /* SoundscapeVIVO */ { .id = "ESS0001", .driver_data = 0 }, /* ES1869 */ { .id = "ESS0005", .driver_data = 0 }, /* ES1878 */ { .id = "ESS6880", .driver_data = 0 }, /* ES688 */ { .id = "IBM0012", .driver_data = 0 }, /* CS4232 */ { .id = "OPT0001", .driver_data = 0 }, /* OPTi Audio16 */ { .id = "YMH0006", .driver_data = 0 }, /* Opl3-SA */ { .id = "YMH0022", .driver_data = 0 }, /* Opl3-SAx */ { .id = "PNPb02f", .driver_data = 0 }, /* Generic */ { .id = "", }, }; MODULE_DEVICE_TABLE(pnp, pnp_devids); static int ns558_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *did) { int ioport, iolen; struct ns558 *ns558; struct gameport *port; if (!pnp_port_valid(dev, 0)) { printk(KERN_WARNING "ns558: No i/o ports on a gameport? Weird\n"); return -ENODEV; } ioport = pnp_port_start(dev, 0); iolen = pnp_port_len(dev, 0); if (!request_region(ioport, iolen, "ns558-pnp")) return -EBUSY; ns558 = kzalloc(sizeof(struct ns558), GFP_KERNEL); port = gameport_allocate_port(); if (!ns558 || !port) { printk(KERN_ERR "ns558: Memory allocation failed\n"); kfree(ns558); gameport_free_port(port); return -ENOMEM; } ns558->io = ioport; ns558->size = iolen; ns558->dev = dev; ns558->gameport = port; gameport_set_name(port, "NS558 PnP Gameport"); gameport_set_phys(port, "pnp%s/gameport0", dev_name(&dev->dev)); port->dev.parent = &dev->dev; port->io = ioport; gameport_register_port(port); list_add_tail(&ns558->node, &ns558_list); return 0; } static struct pnp_driver ns558_pnp_driver = { .name = "ns558", .id_table = pnp_devids, .probe = ns558_pnp_probe, }; #else static struct pnp_driver ns558_pnp_driver; #endif static int __init ns558_init(void) { int i = 0; int error; error = pnp_register_driver(&ns558_pnp_driver); if (error && error != -ENODEV) /* should be ENOSYS really */ return error; /* * Probe ISA ports after PnP, so that PnP ports that are already * enabled get detected as PnP. This may be suboptimal in multi-device * configurations, but saves hassle with simple setups. */ while (ns558_isa_portlist[i]) ns558_isa_probe(ns558_isa_portlist[i++]); return list_empty(&ns558_list) && error ? -ENODEV : 0; } static void __exit ns558_exit(void) { struct ns558 *ns558, *safe; list_for_each_entry_safe(ns558, safe, &ns558_list, node) { gameport_unregister_port(ns558->gameport); release_region(ns558->io & ~(ns558->size - 1), ns558->size); kfree(ns558); } pnp_unregister_driver(&ns558_pnp_driver); } module_init(ns558_init); module_exit(ns558_exit);
gpl-2.0
xiyuansun/linux
drivers/iio/industrialio-event.c
159
13856
/* Industrial I/O event handling * * Copyright (c) 2008 Jonathan Cameron * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * Based on elements of hwmon and input subsystems. */ #include <linux/anon_inodes.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/kfifo.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/wait.h> #include <linux/iio/iio.h> #include "iio_core.h" #include <linux/iio/sysfs.h> #include <linux/iio/events.h> /** * struct iio_event_interface - chrdev interface for an event line * @wait: wait queue to allow blocking reads of events * @det_events: list of detected events * @dev_attr_list: list of event interface sysfs attribute * @flags: file operations related flags including busy flag. * @group: event interface sysfs attribute group */ struct iio_event_interface { wait_queue_head_t wait; DECLARE_KFIFO(det_events, struct iio_event_data, 16); struct list_head dev_attr_list; unsigned long flags; struct attribute_group group; struct mutex read_lock; }; /** * iio_push_event() - try to add event to the list for userspace reading * @indio_dev: IIO device structure * @ev_code: What event * @timestamp: When the event occurred * * Note: The caller must make sure that this function is not running * concurrently for the same indio_dev more than once. **/ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) { struct iio_event_interface *ev_int = indio_dev->event_interface; struct iio_event_data ev; int copied; /* Does anyone care? */ if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { ev.id = ev_code; ev.timestamp = timestamp; copied = kfifo_put(&ev_int->det_events, ev); if (copied != 0) wake_up_poll(&ev_int->wait, POLLIN); } return 0; } EXPORT_SYMBOL(iio_push_event); /** * iio_event_poll() - poll the event queue to find out if it has data */ static unsigned int iio_event_poll(struct file *filep, struct poll_table_struct *wait) { struct iio_dev *indio_dev = filep->private_data; struct iio_event_interface *ev_int = indio_dev->event_interface; unsigned int events = 0; if (!indio_dev->info) return -ENODEV; poll_wait(filep, &ev_int->wait, wait); if (!kfifo_is_empty(&ev_int->det_events)) events = POLLIN | POLLRDNORM; return events; } static ssize_t iio_event_chrdev_read(struct file *filep, char __user *buf, size_t count, loff_t *f_ps) { struct iio_dev *indio_dev = filep->private_data; struct iio_event_interface *ev_int = indio_dev->event_interface; unsigned int copied; int ret; if (!indio_dev->info) return -ENODEV; if (count < sizeof(struct iio_event_data)) return -EINVAL; do { if (kfifo_is_empty(&ev_int->det_events)) { if (filep->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_interruptible(ev_int->wait, !kfifo_is_empty(&ev_int->det_events) || indio_dev->info == NULL); if (ret) return ret; if (indio_dev->info == NULL) return -ENODEV; } if (mutex_lock_interruptible(&ev_int->read_lock)) return -ERESTARTSYS; ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied); mutex_unlock(&ev_int->read_lock); if (ret) return ret; /* * If we couldn't read anything from the fifo (a different * thread might have been faster) we either return -EAGAIN if * the file descriptor is non-blocking, otherwise we go back to * sleep and wait for more data to arrive. */ if (copied == 0 && (filep->f_flags & O_NONBLOCK)) return -EAGAIN; } while (copied == 0); return copied; } static int iio_event_chrdev_release(struct inode *inode, struct file *filep) { struct iio_dev *indio_dev = filep->private_data; struct iio_event_interface *ev_int = indio_dev->event_interface; clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); iio_device_put(indio_dev); return 0; } static const struct file_operations iio_event_chrdev_fileops = { .read = iio_event_chrdev_read, .poll = iio_event_poll, .release = iio_event_chrdev_release, .owner = THIS_MODULE, .llseek = noop_llseek, }; int iio_event_getfd(struct iio_dev *indio_dev) { struct iio_event_interface *ev_int = indio_dev->event_interface; int fd; if (ev_int == NULL) return -ENODEV; if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) return -EBUSY; iio_device_get(indio_dev); fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops, indio_dev, O_RDONLY | O_CLOEXEC); if (fd < 0) { clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); iio_device_put(indio_dev); } else { kfifo_reset_out(&ev_int->det_events); } return fd; } static const char * const iio_ev_type_text[] = { [IIO_EV_TYPE_THRESH] = "thresh", [IIO_EV_TYPE_MAG] = "mag", [IIO_EV_TYPE_ROC] = "roc", [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive", [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive", [IIO_EV_TYPE_CHANGE] = "change", }; static const char * const iio_ev_dir_text[] = { [IIO_EV_DIR_EITHER] = "either", [IIO_EV_DIR_RISING] = "rising", [IIO_EV_DIR_FALLING] = "falling" }; static const char * const iio_ev_info_text[] = { [IIO_EV_INFO_ENABLE] = "en", [IIO_EV_INFO_VALUE] = "value", [IIO_EV_INFO_HYSTERESIS] = "hysteresis", [IIO_EV_INFO_PERIOD] = "period", [IIO_EV_INFO_HIGH_PASS_FILTER_3DB] = "high_pass_filter_3db", [IIO_EV_INFO_LOW_PASS_FILTER_3DB] = "low_pass_filter_3db", }; static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr) { return attr->c->event_spec[attr->address & 0xffff].dir; } static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr) { return attr->c->event_spec[attr->address & 0xffff].type; } static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr) { return (attr->address >> 16) & 0xffff; } static ssize_t iio_ev_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int ret; bool val; ret = strtobool(buf, &val); if (ret < 0) return ret; ret = indio_dev->info->write_event_config(indio_dev, this_attr->c, iio_ev_attr_type(this_attr), iio_ev_attr_dir(this_attr), val); return (ret < 0) ? ret : len; } static ssize_t iio_ev_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int val; val = indio_dev->info->read_event_config(indio_dev, this_attr->c, iio_ev_attr_type(this_attr), iio_ev_attr_dir(this_attr)); if (val < 0) return val; else return sprintf(buf, "%d\n", val); } static ssize_t iio_ev_value_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int val, val2, val_arr[2]; int ret; ret = indio_dev->info->read_event_value(indio_dev, this_attr->c, iio_ev_attr_type(this_attr), iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr), &val, &val2); if (ret < 0) return ret; val_arr[0] = val; val_arr[1] = val2; return iio_format_value(buf, ret, 2, val_arr); } static ssize_t iio_ev_value_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int val, val2; int ret; if (!indio_dev->info->write_event_value) return -EINVAL; ret = iio_str_to_fixpoint(buf, 100000, &val, &val2); if (ret) return ret; ret = indio_dev->info->write_event_value(indio_dev, this_attr->c, iio_ev_attr_type(this_attr), iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr), val, val2); if (ret < 0) return ret; return len; } static int iio_device_add_event(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int spec_index, enum iio_event_type type, enum iio_event_direction dir, enum iio_shared_by shared_by, const unsigned long *mask) { ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t); unsigned int attrcount = 0; unsigned int i; char *postfix; int ret; for_each_set_bit(i, mask, sizeof(*mask)*8) { if (i >= ARRAY_SIZE(iio_ev_info_text)) return -EINVAL; if (dir != IIO_EV_DIR_NONE) postfix = kasprintf(GFP_KERNEL, "%s_%s_%s", iio_ev_type_text[type], iio_ev_dir_text[dir], iio_ev_info_text[i]); else postfix = kasprintf(GFP_KERNEL, "%s_%s", iio_ev_type_text[type], iio_ev_info_text[i]); if (postfix == NULL) return -ENOMEM; if (i == IIO_EV_INFO_ENABLE) { show = iio_ev_state_show; store = iio_ev_state_store; } else { show = iio_ev_value_show; store = iio_ev_value_store; } ret = __iio_add_chan_devattr(postfix, chan, show, store, (i << 16) | spec_index, shared_by, &indio_dev->dev, &indio_dev->event_interface->dev_attr_list); kfree(postfix); if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) continue; if (ret) return ret; attrcount++; } return attrcount; } static int iio_device_add_event_sysfs(struct iio_dev *indio_dev, struct iio_chan_spec const *chan) { int ret = 0, i, attrcount = 0; enum iio_event_direction dir; enum iio_event_type type; for (i = 0; i < chan->num_event_specs; i++) { type = chan->event_spec[i].type; dir = chan->event_spec[i].dir; ret = iio_device_add_event(indio_dev, chan, i, type, dir, IIO_SEPARATE, &chan->event_spec[i].mask_separate); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_event(indio_dev, chan, i, type, dir, IIO_SHARED_BY_TYPE, &chan->event_spec[i].mask_shared_by_type); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_event(indio_dev, chan, i, type, dir, IIO_SHARED_BY_DIR, &chan->event_spec[i].mask_shared_by_dir); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_event(indio_dev, chan, i, type, dir, IIO_SHARED_BY_ALL, &chan->event_spec[i].mask_shared_by_all); if (ret < 0) return ret; attrcount += ret; } ret = attrcount; return ret; } static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev) { int j, ret, attrcount = 0; /* Dynamically created from the channels array */ for (j = 0; j < indio_dev->num_channels; j++) { ret = iio_device_add_event_sysfs(indio_dev, &indio_dev->channels[j]); if (ret < 0) return ret; attrcount += ret; } return attrcount; } static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev) { int j; for (j = 0; j < indio_dev->num_channels; j++) { if (indio_dev->channels[j].num_event_specs != 0) return true; } return false; } static void iio_setup_ev_int(struct iio_event_interface *ev_int) { INIT_KFIFO(ev_int->det_events); init_waitqueue_head(&ev_int->wait); mutex_init(&ev_int->read_lock); } static const char *iio_event_group_name = "events"; int iio_device_register_eventset(struct iio_dev *indio_dev) { struct iio_dev_attr *p; int ret = 0, attrcount_orig = 0, attrcount, attrn; struct attribute **attr; if (!(indio_dev->info->event_attrs || iio_check_for_dynamic_events(indio_dev))) return 0; indio_dev->event_interface = kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL); if (indio_dev->event_interface == NULL) return -ENOMEM; INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list); iio_setup_ev_int(indio_dev->event_interface); if (indio_dev->info->event_attrs != NULL) { attr = indio_dev->info->event_attrs->attrs; while (*attr++ != NULL) attrcount_orig++; } attrcount = attrcount_orig; if (indio_dev->channels) { ret = __iio_add_event_config_attrs(indio_dev); if (ret < 0) goto error_free_setup_event_lines; attrcount += ret; } indio_dev->event_interface->group.name = iio_event_group_name; indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1, sizeof(indio_dev->event_interface->group.attrs[0]), GFP_KERNEL); if (indio_dev->event_interface->group.attrs == NULL) { ret = -ENOMEM; goto error_free_setup_event_lines; } if (indio_dev->info->event_attrs) memcpy(indio_dev->event_interface->group.attrs, indio_dev->info->event_attrs->attrs, sizeof(indio_dev->event_interface->group.attrs[0]) *attrcount_orig); attrn = attrcount_orig; /* Add all elements from the list. */ list_for_each_entry(p, &indio_dev->event_interface->dev_attr_list, l) indio_dev->event_interface->group.attrs[attrn++] = &p->dev_attr.attr; indio_dev->groups[indio_dev->groupcounter++] = &indio_dev->event_interface->group; return 0; error_free_setup_event_lines: iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list); kfree(indio_dev->event_interface); indio_dev->event_interface = NULL; return ret; } /** * iio_device_wakeup_eventset - Wakes up the event waitqueue * @indio_dev: The IIO device * * Wakes up the event waitqueue used for poll() and blocking read(). * Should usually be called when the device is unregistered. */ void iio_device_wakeup_eventset(struct iio_dev *indio_dev) { if (indio_dev->event_interface == NULL) return; wake_up(&indio_dev->event_interface->wait); } void iio_device_unregister_eventset(struct iio_dev *indio_dev) { if (indio_dev->event_interface == NULL) return; iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list); kfree(indio_dev->event_interface->group.attrs); kfree(indio_dev->event_interface); }
gpl-2.0
miiicmueller/android_kernel_raspberryPi_rpiv2
drivers/input/touchscreen/ipaq-micro-ts.c
415
3942
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * h3600 atmel micro companion support, touchscreen subdevice * Author : Alessandro Gardich <gremlin@gremlin.it> * Author : Dmitry Artamonow <mad_soft@inbox.ru> * Author : Linus Walleij <linus.walleij@linaro.org> * */ #include <asm/byteorder.h> #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pm.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/input.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/mfd/ipaq-micro.h> struct touchscreen_data { struct input_dev *input; struct ipaq_micro *micro; }; static void micro_ts_receive(void *data, int len, unsigned char *msg) { struct touchscreen_data *ts = data; if (len == 4) { input_report_abs(ts->input, ABS_X, be16_to_cpup((__be16 *) &msg[2])); input_report_abs(ts->input, ABS_Y, be16_to_cpup((__be16 *) &msg[0])); input_report_key(ts->input, BTN_TOUCH, 1); input_sync(ts->input); } else if (len == 0) { input_report_abs(ts->input, ABS_X, 0); input_report_abs(ts->input, ABS_Y, 0); input_report_key(ts->input, BTN_TOUCH, 0); input_sync(ts->input); } } static void micro_ts_toggle_receive(struct touchscreen_data *ts, bool enable) { struct ipaq_micro *micro = ts->micro; spin_lock_irq(&micro->lock); if (enable) { micro->ts = micro_ts_receive; micro->ts_data = ts; } else { micro->ts = NULL; micro->ts_data = NULL; } spin_unlock_irq(&ts->micro->lock); } static int micro_ts_open(struct input_dev *input) { struct touchscreen_data *ts = input_get_drvdata(input); micro_ts_toggle_receive(ts, true); return 0; } static void micro_ts_close(struct input_dev *input) { struct touchscreen_data *ts = input_get_drvdata(input); micro_ts_toggle_receive(ts, false); } static int micro_ts_probe(struct platform_device *pdev) { struct ipaq_micro *micro = dev_get_drvdata(pdev->dev.parent); struct touchscreen_data *ts; int error; ts = devm_kzalloc(&pdev->dev, sizeof(*ts), GFP_KERNEL); if (!ts) return -ENOMEM; ts->micro = micro; ts->input = devm_input_allocate_device(&pdev->dev); if (!ts->input) { dev_err(&pdev->dev, "failed to allocate input device\n"); return -ENOMEM; } ts->input->name = "ipaq micro ts"; ts->input->open = micro_ts_open; ts->input->close = micro_ts_close; input_set_drvdata(ts->input, ts); input_set_capability(ts->input, EV_KEY, BTN_TOUCH); input_set_capability(ts->input, EV_ABS, ABS_X); input_set_capability(ts->input, EV_ABS, ABS_Y); input_set_abs_params(ts->input, ABS_X, 0, 1023, 0, 0); input_set_abs_params(ts->input, ABS_Y, 0, 1023, 0, 0); error = input_register_device(ts->input); if (error) { dev_err(&pdev->dev, "error registering touch input\n"); return error; } platform_set_drvdata(pdev, ts); dev_info(&pdev->dev, "iPAQ micro touchscreen\n"); return 0; } #ifdef CONFIG_PM_SLEEP static int micro_ts_suspend(struct device *dev) { struct touchscreen_data *ts = dev_get_drvdata(dev); micro_ts_toggle_receive(ts, false); return 0; } static int micro_ts_resume(struct device *dev) { struct touchscreen_data *ts = dev_get_drvdata(dev); struct input_dev *input = ts->input; mutex_lock(&input->mutex); if (input->users) micro_ts_toggle_receive(ts, true); mutex_unlock(&input->mutex); return 0; } #endif static const struct dev_pm_ops micro_ts_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(micro_ts_suspend, micro_ts_resume) }; static struct platform_driver micro_ts_device_driver = { .driver = { .name = "ipaq-micro-ts", .pm = &micro_ts_dev_pm_ops, }, .probe = micro_ts_probe, }; module_platform_driver(micro_ts_device_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("driver for iPAQ Atmel micro touchscreen"); MODULE_ALIAS("platform:ipaq-micro-ts");
gpl-2.0
pranav01/Gods_kernel_YU
drivers/power/qpnp-smbcharger.c
415
99686
/* Copyright (c) 2014 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "SMBCHG: %s: " fmt, __func__ #include <linux/spmi.h> #include <linux/spinlock.h> #include <linux/gpio.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/power_supply.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/bitops.h> #include <linux/regulator/driver.h> #include <linux/regulator/of_regulator.h> #include <linux/regulator/machine.h> #include <linux/spmi.h> #include <linux/printk.h> #include <linux/ratelimit.h> /* Mask/Bit helpers */ #define _SMB_MASK(BITS, POS) \ ((unsigned char)(((1 << (BITS)) - 1) << (POS))) #define SMB_MASK(LEFT_BIT_POS, RIGHT_BIT_POS) \ _SMB_MASK((LEFT_BIT_POS) - (RIGHT_BIT_POS) + 1, \ (RIGHT_BIT_POS)) /* Config registers */ struct smbchg_regulator { struct regulator_desc rdesc; struct regulator_dev *rdev; }; struct parallel_usb_cfg { struct power_supply *psy; int min_current_thr_ma; int min_9v_current_thr_ma; int allowed_lowering_ma; int current_max_ma; bool avail; struct mutex lock; int initial_aicl_ma; }; struct smbchg_chip { struct device *dev; struct spmi_device *spmi; /* peripheral register address bases */ u16 chgr_base; u16 bat_if_base; u16 usb_chgpth_base; u16 dc_chgpth_base; u16 otg_base; u16 misc_base; int fake_battery_soc; u8 revision[4]; /* configuration parameters */ int iterm_ma; int usb_max_current_ma; int dc_max_current_ma; int usb_target_current_ma; int usb_tl_current_ma; int dc_target_current_ma; int target_fastchg_current_ma; int fastchg_current_ma; int vfloat_mv; int resume_delta_mv; int safety_time; int prechg_safety_time; int bmd_pin_src; bool use_vfloat_adjustments; bool iterm_disabled; bool bmd_algo_disabled; bool soft_vfloat_comp_disabled; bool chg_enabled; bool low_icl_wa_on; bool battery_unknown; bool charge_unknown_battery; bool chg_inhibit_source_fg; u8 original_usbin_allowance; struct parallel_usb_cfg parallel; struct delayed_work parallel_en_work; /* flash current prediction */ int rpara_uohm; int rslow_uohm; /* status variables */ int usb_suspended; int dc_suspended; int wake_reasons; bool usb_online; bool dc_present; bool usb_present; bool batt_present; bool otg_retries; /* jeita and temperature */ bool batt_hot; bool batt_cold; bool batt_warm; bool batt_cool; unsigned int thermal_levels; unsigned int therm_lvl_sel; unsigned int *thermal_mitigation; /* irqs */ int batt_hot_irq; int batt_warm_irq; int batt_cool_irq; int batt_cold_irq; int batt_missing_irq; int vbat_low_irq; int chg_hot_irq; int chg_term_irq; int taper_irq; bool taper_irq_enabled; struct mutex taper_irq_lock; int recharge_irq; int fastchg_irq; int safety_timeout_irq; int power_ok_irq; int dcin_uv_irq; int usbin_uv_irq; int src_detect_irq; int otg_fail_irq; int otg_oc_irq; int aicl_done_irq; int usbid_change_irq; int chg_error_irq; bool enable_aicl_wake; /* psy */ struct power_supply *usb_psy; struct power_supply batt_psy; struct power_supply dc_psy; struct power_supply *bms_psy; int dc_psy_type; const char *bms_psy_name; const char *battery_psy_name; bool psy_registered; struct smbchg_regulator otg_vreg; struct smbchg_regulator ext_otg_vreg; struct work_struct usb_set_online_work; spinlock_t sec_access_lock; struct mutex current_change_lock; struct mutex usb_set_online_lock; struct mutex usb_en_lock; struct mutex dc_en_lock; struct mutex pm_lock; }; enum print_reason { PR_REGISTER = BIT(0), PR_INTERRUPT = BIT(1), PR_STATUS = BIT(2), PR_DUMP = BIT(3), PR_PM = BIT(4), PR_MISC = BIT(5), }; enum wake_reason { PM_PARALLEL_CHECK = BIT(0), }; static int smbchg_debug_mask; module_param_named( debug_mask, smbchg_debug_mask, int, S_IRUSR | S_IWUSR ); static int smbchg_parallel_en; module_param_named( parallel_en, smbchg_parallel_en, int, S_IRUSR | S_IWUSR ); #define pr_smb(reason, fmt, ...) \ do { \ if (smbchg_debug_mask & (reason)) \ pr_info(fmt, ##__VA_ARGS__); \ else \ pr_debug(fmt, ##__VA_ARGS__); \ } while (0) #define pr_smb_rt(reason, fmt, ...) \ do { \ if (smbchg_debug_mask & (reason)) \ pr_info_ratelimited(fmt, ##__VA_ARGS__); \ else \ pr_debug_ratelimited(fmt, ##__VA_ARGS__); \ } while (0) static int smbchg_read(struct smbchg_chip *chip, u8 *val, u16 addr, int count) { int rc = 0; struct spmi_device *spmi = chip->spmi; if (addr == 0) { dev_err(chip->dev, "addr cannot be zero addr=0x%02x sid=0x%02x rc=%d\n", addr, spmi->sid, rc); return -EINVAL; } rc = spmi_ext_register_readl(spmi->ctrl, spmi->sid, addr, val, count); if (rc) { dev_err(chip->dev, "spmi read failed addr=0x%02x sid=0x%02x rc=%d\n", addr, spmi->sid, rc); return rc; } return 0; } /* * Writes an arbitrary number of bytes to a specified register * * Do not use this function for register writes if possible. Instead use the * smbchg_masked_write function. * * The sec_access_lock must be held for all register writes and this function * does not do that. If this function is used, please hold the spinlock or * random secure access writes may fail. */ static int smbchg_write(struct smbchg_chip *chip, u8 *val, u16 addr, int count) { int rc = 0; struct spmi_device *spmi = chip->spmi; if (addr == 0) { dev_err(chip->dev, "addr cannot be zero addr=0x%02x sid=0x%02x rc=%d\n", addr, spmi->sid, rc); return -EINVAL; } rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid, addr, val, count); if (rc) { dev_err(chip->dev, "write failed addr=0x%02x sid=0x%02x rc=%d\n", addr, spmi->sid, rc); return rc; } return 0; } /* * Writes a register to the specified by the base and limited by the bit mask * * Do not use this function for register writes if possible. Instead use the * smbchg_masked_write function. * * The sec_access_lock must be held for all register writes and this function * does not do that. If this function is used, please hold the spinlock or * random secure access writes may fail. */ static int smbchg_masked_write_raw(struct smbchg_chip *chip, u16 base, u8 mask, u8 val) { int rc; u8 reg; rc = smbchg_read(chip, &reg, base, 1); if (rc) { dev_err(chip->dev, "spmi read failed: addr=%03X, rc=%d\n", base, rc); return rc; } reg &= ~mask; reg |= val & mask; pr_smb(PR_REGISTER, "addr = 0x%x writing 0x%x\n", base, reg); rc = smbchg_write(chip, &reg, base, 1); if (rc) { dev_err(chip->dev, "spmi write failed: addr=%03X, rc=%d\n", base, rc); return rc; } return 0; } /* * Writes a register to the specified by the base and limited by the bit mask * * This function holds a spin lock to ensure secure access register writes goes * through. If the secure access unlock register is armed, any old register * write can unarm the secure access unlock, causing the next write to fail. * * Note: do not use this for sec_access registers. Instead use the function * below: smbchg_sec_masked_write */ static int smbchg_masked_write(struct smbchg_chip *chip, u16 base, u8 mask, u8 val) { unsigned long flags; int rc; spin_lock_irqsave(&chip->sec_access_lock, flags); rc = smbchg_masked_write_raw(chip, base, mask, val); spin_unlock_irqrestore(&chip->sec_access_lock, flags); return rc; } /* * Unlocks sec access and writes to the register specified. * * This function holds a spin lock to exclude other register writes while * the two writes are taking place. */ #define SEC_ACCESS_OFFSET 0xD0 #define SEC_ACCESS_VALUE 0xA5 #define PERIPHERAL_MASK 0xFF static int smbchg_sec_masked_write(struct smbchg_chip *chip, u16 base, u8 mask, u8 val) { unsigned long flags; int rc; u16 peripheral_base = base & (~PERIPHERAL_MASK); spin_lock_irqsave(&chip->sec_access_lock, flags); rc = smbchg_masked_write_raw(chip, peripheral_base + SEC_ACCESS_OFFSET, SEC_ACCESS_VALUE, SEC_ACCESS_VALUE); if (rc) { dev_err(chip->dev, "Unable to unlock sec_access: %d", rc); goto out; } rc = smbchg_masked_write_raw(chip, base, mask, val); out: spin_unlock_irqrestore(&chip->sec_access_lock, flags); return rc; } static void smbchg_stay_awake(struct smbchg_chip *chip, int reason) { int reasons; mutex_lock(&chip->pm_lock); reasons = chip->wake_reasons | reason; if (reasons != 0 && chip->wake_reasons == 0) { pr_smb(PR_PM, "staying awake: 0x%02x (bit %d)\n", reasons, reason); pm_stay_awake(chip->dev); } chip->wake_reasons = reasons; mutex_unlock(&chip->pm_lock); } static void smbchg_relax(struct smbchg_chip *chip, int reason) { int reasons; mutex_lock(&chip->pm_lock); reasons = chip->wake_reasons & (~reason); if (reasons == 0 && chip->wake_reasons != 0) { pr_smb(PR_PM, "relaxing: 0x%02x (bit %d)\n", reasons, reason); pm_relax(chip->dev); } chip->wake_reasons = reasons; mutex_unlock(&chip->pm_lock); }; enum pwr_path_type { UNKNOWN = 0, PWR_PATH_BATTERY = 1, PWR_PATH_USB = 2, PWR_PATH_DC = 3, }; #define PWR_PATH 0x08 #define PWR_PATH_MASK 0x03 static enum pwr_path_type smbchg_get_pwr_path(struct smbchg_chip *chip) { int rc; u8 reg; rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + PWR_PATH, 1); if (rc < 0) { dev_err(chip->dev, "Couldn't read PWR_PATH rc = %d\n", rc); return PWR_PATH_BATTERY; } return reg & PWR_PATH_MASK; } #define RID_STS 0xB #define RID_MASK 0xF #define IDEV_STS 0x8 #define RT_STS 0x10 #define USBID_MSB 0xE #define USBIN_UV_BIT 0x0 #define USBIN_OV_BIT 0x1 #define FMB_STS_MASK SMB_MASK(3, 0) #define USBID_GND_THRESHOLD 0x495 static bool is_otg_present(struct smbchg_chip *chip) { int rc; u8 reg; u8 usbid_reg[2]; u16 usbid_val; /* * There is a problem with USBID conversions on PMI8994 revisions * 2.0.0. As a workaround, check that the cable is not * detected as factory test before enabling OTG. */ rc = smbchg_read(chip, &reg, chip->misc_base + IDEV_STS, 1); if (rc < 0) { dev_err(chip->dev, "Couldn't read IDEV_STS rc = %d\n", rc); return false; } if ((reg & FMB_STS_MASK) != 0) { pr_smb(PR_STATUS, "IDEV_STS = %02x, not ground\n", reg); return false; } rc = smbchg_read(chip, usbid_reg, chip->usb_chgpth_base + USBID_MSB, 2); if (rc < 0) { dev_err(chip->dev, "Couldn't read USBID rc = %d\n", rc); return false; } usbid_val = (usbid_reg[0] << 8) | usbid_reg[1]; if (usbid_val > USBID_GND_THRESHOLD) { pr_smb(PR_STATUS, "USBID = 0x%04x, too high to be ground\n", usbid_val); return false; } rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RID_STS, 1); if (rc < 0) { dev_err(chip->dev, "Couldn't read usb rid status rc = %d\n", rc); return false; } pr_smb(PR_STATUS, "RID_STS = %02x\n", reg); return (reg & RID_MASK) == 0; } #define USBIN_9V BIT(5) #define USBIN_UNREG BIT(4) #define USBIN_LV BIT(3) #define DCIN_9V BIT(2) #define DCIN_UNREG BIT(1) #define DCIN_LV BIT(0) #define INPUT_STS 0x0D #define DCIN_UV_BIT 0x0 #define DCIN_OV_BIT 0x1 static bool is_dc_present(struct smbchg_chip *chip) { int rc; u8 reg; rc = smbchg_read(chip, &reg, chip->dc_chgpth_base + RT_STS, 1); if (rc < 0) { dev_err(chip->dev, "Couldn't read dc status rc = %d\n", rc); return false; } if ((reg & DCIN_UV_BIT) || (reg & DCIN_OV_BIT)) return false; return true; } static bool is_usb_present(struct smbchg_chip *chip) { int rc; u8 reg; rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1); if (rc < 0) { dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc); return false; } if ((reg & USBIN_UV_BIT) || (reg & USBIN_OV_BIT)) return false; rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + INPUT_STS, 1); if (rc < 0) { dev_err(chip->dev, "Couldn't read usb status rc = %d\n", rc); return false; } return !!(reg & (USBIN_9V | USBIN_UNREG | USBIN_LV)); } static char *usb_type_str[] = { "SDP", /* bit 0 */ "OTHER", /* bit 1 */ "DCP", /* bit 2 */ "CDP", /* bit 3 */ "NONE", /* bit 4 error case */ }; #define N_TYPE_BITS 4 #define TYPE_BITS_OFFSET 4 /* helper to return the string of USB type */ static char *get_usb_type_name(u8 type_reg) { unsigned long type = type_reg; type >>= TYPE_BITS_OFFSET; return usb_type_str[find_first_bit(&type, N_TYPE_BITS)]; } static enum power_supply_type usb_type_enum[] = { POWER_SUPPLY_TYPE_USB, /* bit 0 */ POWER_SUPPLY_TYPE_UNKNOWN, /* bit 1 */ POWER_SUPPLY_TYPE_USB_DCP, /* bit 2 */ POWER_SUPPLY_TYPE_USB_CDP, /* bit 3 */ POWER_SUPPLY_TYPE_USB, /* bit 4 error case, report SDP */ }; /* helper to return enum power_supply_type of USB type */ static enum power_supply_type get_usb_supply_type(u8 type_reg) { unsigned long type = type_reg; type >>= TYPE_BITS_OFFSET; return usb_type_enum[find_first_bit(&type, N_TYPE_BITS)]; } static enum power_supply_property smbchg_battery_properties[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_CHARGING_ENABLED, POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL, POWER_SUPPLY_PROP_FLASH_CURRENT_MAX, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_VOLTAGE_NOW, }; #define CHGR_STS 0x0E #define BATT_LESS_THAN_2V BIT(4) #define CHG_HOLD_OFF_BIT BIT(3) #define CHG_TYPE_MASK SMB_MASK(2, 1) #define CHG_TYPE_SHIFT 1 #define BATT_NOT_CHG_VAL 0x0 #define BATT_PRE_CHG_VAL 0x1 #define BATT_FAST_CHG_VAL 0x2 #define BATT_TAPER_CHG_VAL 0x3 #define CHG_EN_BIT BIT(0) #define CHG_INHIBIT_BIT BIT(1) #define BAT_TCC_REACHED_BIT BIT(7) static int get_prop_batt_status(struct smbchg_chip *chip) { int rc, status = POWER_SUPPLY_STATUS_DISCHARGING; u8 reg = 0, chg_type; bool charger_present, chg_inhibit; rc = smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1); if (rc < 0) { dev_err(chip->dev, "Unable to read RT_STS rc = %d\n", rc); return POWER_SUPPLY_STATUS_UNKNOWN; } if (reg & BAT_TCC_REACHED_BIT) return POWER_SUPPLY_STATUS_FULL; charger_present = is_usb_present(chip) | is_dc_present(chip); if (!charger_present) return POWER_SUPPLY_STATUS_DISCHARGING; chg_inhibit = reg & CHG_INHIBIT_BIT; if (chg_inhibit) return POWER_SUPPLY_STATUS_FULL; rc = smbchg_read(chip, &reg, chip->chgr_base + CHGR_STS, 1); if (rc < 0) { dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc); return POWER_SUPPLY_STATUS_UNKNOWN; } if (reg & CHG_HOLD_OFF_BIT) { /* * when chg hold off happens the battery is * not charging */ status = POWER_SUPPLY_STATUS_NOT_CHARGING; goto out; } chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT; if (chg_type == BATT_NOT_CHG_VAL) status = POWER_SUPPLY_STATUS_DISCHARGING; else status = POWER_SUPPLY_STATUS_CHARGING; out: pr_smb_rt(PR_MISC, "CHGR_STS = 0x%02x\n", reg); return status; } #define BAT_PRES_STATUS 0x08 #define BAT_PRES_BIT BIT(7) static int get_prop_batt_present(struct smbchg_chip *chip) { int rc; u8 reg; rc = smbchg_read(chip, &reg, chip->bat_if_base + BAT_PRES_STATUS, 1); if (rc < 0) { dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc); return 0; } return !!(reg & BAT_PRES_BIT); } static int get_prop_charge_type(struct smbchg_chip *chip) { int rc; u8 reg, chg_type; rc = smbchg_read(chip, &reg, chip->chgr_base + CHGR_STS, 1); if (rc < 0) { dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc); return 0; } chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT; if (chg_type == BATT_NOT_CHG_VAL) return POWER_SUPPLY_CHARGE_TYPE_NONE; else if (chg_type == BATT_TAPER_CHG_VAL) return POWER_SUPPLY_CHARGE_TYPE_TAPER; else if (chg_type == BATT_FAST_CHG_VAL) return POWER_SUPPLY_CHARGE_TYPE_FAST; else if (chg_type == BATT_PRE_CHG_VAL) return POWER_SUPPLY_CHARGE_TYPE_TRICKLE; return POWER_SUPPLY_CHARGE_TYPE_NONE; } #define DEFAULT_BATT_CAPACITY 50 static int get_prop_batt_capacity(struct smbchg_chip *chip) { union power_supply_propval ret = {0, }; if (chip->fake_battery_soc >= 0) return chip->fake_battery_soc; if (!chip->bms_psy && chip->bms_psy_name) chip->bms_psy = power_supply_get_by_name((char *)chip->bms_psy_name); if (chip->bms_psy) { chip->bms_psy->get_property(chip->bms_psy, POWER_SUPPLY_PROP_CAPACITY, &ret); return ret.intval; } return DEFAULT_BATT_CAPACITY; } #define DEFAULT_BATT_TEMP 200 static int get_prop_batt_temp(struct smbchg_chip *chip) { union power_supply_propval ret = {0, }; if (!chip->bms_psy && chip->bms_psy_name) chip->bms_psy = power_supply_get_by_name((char *)chip->bms_psy_name); if (chip->bms_psy) { chip->bms_psy->get_property(chip->bms_psy, POWER_SUPPLY_PROP_TEMP, &ret); return ret.intval; } return DEFAULT_BATT_TEMP; } #define DEFAULT_BATT_CURRENT_NOW 0 static int get_prop_batt_current_now(struct smbchg_chip *chip) { union power_supply_propval ret = {0, }; if (!chip->bms_psy && chip->bms_psy_name) chip->bms_psy = power_supply_get_by_name((char *)chip->bms_psy_name); if (chip->bms_psy) { chip->bms_psy->get_property(chip->bms_psy, POWER_SUPPLY_PROP_CURRENT_NOW, &ret); return ret.intval; } return DEFAULT_BATT_CURRENT_NOW; } #define DEFAULT_BATT_VOLTAGE_NOW 0 static int get_prop_batt_voltage_now(struct smbchg_chip *chip) { union power_supply_propval ret = {0, }; if (!chip->bms_psy && chip->bms_psy_name) chip->bms_psy = power_supply_get_by_name((char *)chip->bms_psy_name); if (chip->bms_psy) { chip->bms_psy->get_property(chip->bms_psy, POWER_SUPPLY_PROP_VOLTAGE_NOW, &ret); return ret.intval; } return DEFAULT_BATT_VOLTAGE_NOW; } static int get_prop_batt_health(struct smbchg_chip *chip) { if (chip->batt_hot) return POWER_SUPPLY_HEALTH_OVERHEAT; else if (chip->batt_cold) return POWER_SUPPLY_HEALTH_COLD; else if (chip->batt_warm) return POWER_SUPPLY_HEALTH_WARM; else if (chip->batt_cool) return POWER_SUPPLY_HEALTH_COOL; else return POWER_SUPPLY_HEALTH_GOOD; } int usb_current_table[] = { 300, 400, 450, 475, 500, 550, 600, 650, 700, 900, 950, 1000, 1100, 1200, 1400, 1450, 1500, 1600, 1800, 1850, 1880, 1910, 1930, 1950, 1970, 2000, 2050, 2100, 2300, 2400, 2500, 3000 }; int dc_current_table[] = { 300, 400, 450, 475, 500, 550, 600, 650, 700, 900, 950, 1000, 1100, 1200, 1400, 1450, 1500, 1600, 1800, 1850, 1880, 1910, 1930, 1950, 1970, 2000, }; static int calc_thermal_limited_current(struct smbchg_chip *chip, int current_ma) { int therm_ma; if (chip->therm_lvl_sel > 0 && chip->therm_lvl_sel < (chip->thermal_levels - 1)) { /* * consider thermal limit only when it is active and not at * the highest level */ therm_ma = (int)chip->thermal_mitigation[chip->therm_lvl_sel]; if (therm_ma < current_ma) { pr_smb(PR_STATUS, "Limiting current due to thermal: %d mA", therm_ma); return therm_ma; } } return current_ma; } #define CMD_IL 0x40 #define USBIN_SUSPEND_BIT BIT(4) #define CURRENT_100_MA 100 #define CURRENT_150_MA 150 #define CURRENT_500_MA 500 #define CURRENT_900_MA 900 #define SUSPEND_CURRENT_MA 2 static int smbchg_usb_suspend(struct smbchg_chip *chip, bool suspend) { int rc; rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL, USBIN_SUSPEND_BIT, suspend ? USBIN_SUSPEND_BIT : 0); if (rc < 0) dev_err(chip->dev, "Couldn't set usb suspend rc = %d\n", rc); return rc; } #define DCIN_SUSPEND_BIT BIT(3) static int smbchg_dc_suspend(struct smbchg_chip *chip, bool suspend) { int rc = 0; rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL, DCIN_SUSPEND_BIT, suspend ? DCIN_SUSPEND_BIT : 0); if (rc < 0) dev_err(chip->dev, "Couldn't set dc suspend rc = %d\n", rc); return rc; } #define IL_CFG 0xF2 #define DCIN_INPUT_MASK SMB_MASK(4, 0) static int smbchg_set_dc_current_max(struct smbchg_chip *chip, int current_ma) { int i; u8 dc_cur_val; for (i = ARRAY_SIZE(dc_current_table) - 1; i >= 0; i--) { if (current_ma >= dc_current_table[i]) break; } if (i < 0) { dev_err(chip->dev, "Cannot find %dma current_table\n", current_ma); return -EINVAL; } chip->dc_max_current_ma = dc_current_table[i]; dc_cur_val = i & DCIN_INPUT_MASK; pr_smb(PR_STATUS, "dc current set to %d mA\n", chip->dc_max_current_ma); return smbchg_sec_masked_write(chip, chip->dc_chgpth_base + IL_CFG, DCIN_INPUT_MASK, dc_cur_val); } enum enable_reason { /* userspace has suspended charging altogether */ REASON_USER = BIT(0), /* * this specific path has been suspended through the power supply * framework */ REASON_POWER_SUPPLY = BIT(1), /* * the usb driver has suspended this path by setting a current limit * of < 2MA */ REASON_USB = BIT(2), /* * when a wireless charger comes online, * the dc path is suspended for a second */ REASON_WIRELESS = BIT(3), /* * the thermal daemon can suspend a charge path when the system * temperature levels rise */ REASON_THERMAL = BIT(4), /* * an external OTG supply is being used, suspend charge path so the * charger does not accidentally try to charge from the external supply. */ REASON_OTG = BIT(5), }; static struct power_supply *get_parallel_psy(struct smbchg_chip *chip) { if (!chip->parallel.avail) return NULL; if (chip->parallel.psy) return chip->parallel.psy; chip->parallel.psy = power_supply_get_by_name("usb-parallel"); if (!chip->parallel.psy) pr_smb(PR_STATUS, "parallel charger not found\n"); return chip->parallel.psy; } static void smbchg_usb_update_online_work(struct work_struct *work) { struct smbchg_chip *chip = container_of(work, struct smbchg_chip, usb_set_online_work); bool user_enabled = (chip->usb_suspended & REASON_USER) == 0; int online = user_enabled && chip->usb_present; mutex_lock(&chip->usb_set_online_lock); if (chip->usb_online != online) { power_supply_set_online(chip->usb_psy, online); chip->usb_online = online; } mutex_unlock(&chip->usb_set_online_lock); } static bool smbchg_primary_usb_is_en(struct smbchg_chip *chip, enum enable_reason reason) { bool enabled; mutex_lock(&chip->usb_en_lock); enabled = (chip->usb_suspended & reason) == 0; mutex_unlock(&chip->usb_en_lock); return enabled; } static int smbchg_primary_usb_en(struct smbchg_chip *chip, bool enable, enum enable_reason reason, bool *changed) { int rc = 0, suspended; pr_smb(PR_STATUS, "usb %s, susp = %02x, en? = %d, reason = %02x\n", chip->usb_suspended == 0 ? "enabled" : "suspended", chip->usb_suspended, enable, reason); mutex_lock(&chip->usb_en_lock); if (!enable) suspended = chip->usb_suspended | reason; else suspended = chip->usb_suspended & (~reason); /* avoid unnecessary spmi interactions if nothing changed */ if (!!suspended == !!chip->usb_suspended) { *changed = false; goto out; } *changed = true; rc = smbchg_usb_suspend(chip, suspended != 0); if (rc < 0) { dev_err(chip->dev, "Couldn't set usb suspend: %d rc = %d\n", suspended, rc); goto out; } pr_smb(PR_STATUS, "usb charging %s, suspended = %02x\n", suspended == 0 ? "enabled" : "suspended", suspended); out: chip->usb_suspended = suspended; mutex_unlock(&chip->usb_en_lock); return rc; } static int smbchg_dc_en(struct smbchg_chip *chip, bool enable, enum enable_reason reason) { int rc = 0, suspended; pr_smb(PR_STATUS, "dc %s, susp = %02x, en? = %d, reason = %02x\n", chip->dc_suspended == 0 ? "enabled" : "suspended", chip->dc_suspended, enable, reason); mutex_lock(&chip->dc_en_lock); if (!enable) suspended = chip->dc_suspended | reason; else suspended = chip->dc_suspended & ~reason; /* avoid unnecessary spmi interactions if nothing changed */ if (!!suspended == !!chip->dc_suspended) goto out; rc = smbchg_dc_suspend(chip, suspended != 0); if (rc < 0) { dev_err(chip->dev, "Couldn't set dc suspend: %d rc = %d\n", suspended, rc); goto out; } if (chip->psy_registered) power_supply_changed(&chip->dc_psy); pr_smb(PR_STATUS, "dc charging %s, suspended = %02x\n", suspended == 0 ? "enabled" : "suspended", suspended); out: chip->dc_suspended = suspended; mutex_unlock(&chip->dc_en_lock); return rc; } #define CHGPTH_CFG 0xF4 #define CFG_USB_2_3_SEL_BIT BIT(7) #define CFG_USB_2 0 #define CFG_USB_3 BIT(7) #define USBIN_INPUT_MASK SMB_MASK(4, 0) #define USBIN_MODE_CHG_BIT BIT(0) #define USBIN_LIMITED_MODE 0 #define USBIN_HC_MODE BIT(0) #define USB51_MODE_BIT BIT(1) #define USB51_100MA 0 #define USB51_500MA BIT(1) static int smbchg_set_high_usb_chg_current(struct smbchg_chip *chip, int current_ma) { int i, rc; u8 usb_cur_val; for (i = ARRAY_SIZE(usb_current_table) - 1; i >= 0; i--) { if (current_ma >= usb_current_table[i]) break; } if (i < 0) { dev_err(chip->dev, "Cannot find %dma current_table using %d\n", current_ma, CURRENT_150_MA); rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, CFG_USB_2_3_SEL_BIT, CFG_USB_2); rc |= smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL, USBIN_MODE_CHG_BIT | USB51_MODE_BIT, USBIN_LIMITED_MODE | USB51_100MA); if (rc < 0) dev_err(chip->dev, "Couldn't set %dmA rc=%d\n", CURRENT_150_MA, rc); else chip->usb_max_current_ma = 150; return rc; } usb_cur_val = i & USBIN_INPUT_MASK; rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + IL_CFG, USBIN_INPUT_MASK, usb_cur_val); if (rc < 0) { dev_err(chip->dev, "cannot write to config c rc = %d\n", rc); return rc; } rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL, USBIN_MODE_CHG_BIT, USBIN_HC_MODE); if (rc < 0) dev_err(chip->dev, "Couldn't write cfg 5 rc = %d\n", rc); chip->usb_max_current_ma = usb_current_table[i]; return rc; } /* if APSD results are used * if SDP is detected it will look at 500mA setting * if set it will draw 500mA * if unset it will draw 100mA * if CDP/DCP it will look at 0x0C setting * i.e. values in 0x41[1, 0] does not matter */ static int smbchg_set_usb_current_max(struct smbchg_chip *chip, int current_ma) { int rc; bool changed; if (!chip->batt_present) { pr_info_ratelimited("Ignoring usb current->%d, battery is absent\n", current_ma); return 0; } pr_smb(PR_STATUS, "USB current_ma = %d\n", current_ma); if (current_ma == SUSPEND_CURRENT_MA) { /* suspend the usb if current set to 2mA */ rc = smbchg_primary_usb_en(chip, false, REASON_USB, &changed); chip->usb_max_current_ma = 0; goto out; } else { rc = smbchg_primary_usb_en(chip, true, REASON_USB, &changed); } if (chip->low_icl_wa_on) { chip->usb_max_current_ma = current_ma; pr_smb(PR_STATUS, "low_icl_wa on, ignoring the usb current setting\n"); goto out; } if (current_ma < CURRENT_150_MA) { /* force 100mA */ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, CFG_USB_2_3_SEL_BIT, CFG_USB_2); rc |= smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL, USBIN_MODE_CHG_BIT | USB51_MODE_BIT, USBIN_LIMITED_MODE | USB51_100MA); chip->usb_max_current_ma = 100; goto out; } /* specific current values */ if (current_ma == CURRENT_150_MA) { rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, CFG_USB_2_3_SEL_BIT, CFG_USB_3); rc |= smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL, USBIN_MODE_CHG_BIT | USB51_MODE_BIT, USBIN_LIMITED_MODE | USB51_100MA); chip->usb_max_current_ma = 150; goto out; } if (current_ma == CURRENT_500_MA) { rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, CFG_USB_2_3_SEL_BIT, CFG_USB_2); rc |= smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL, USBIN_MODE_CHG_BIT | USB51_MODE_BIT, USBIN_LIMITED_MODE | USB51_500MA); chip->usb_max_current_ma = 500; goto out; } if (current_ma == CURRENT_900_MA) { rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, CFG_USB_2_3_SEL_BIT, CFG_USB_3); rc |= smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL, USBIN_MODE_CHG_BIT | USB51_MODE_BIT, USBIN_LIMITED_MODE | USB51_500MA); chip->usb_max_current_ma = 900; goto out; } rc = smbchg_set_high_usb_chg_current(chip, current_ma); out: pr_smb(PR_STATUS, "usb current set to %d mA\n", chip->usb_max_current_ma); if (rc < 0) dev_err(chip->dev, "Couldn't set %dmA rc = %d\n", current_ma, rc); return rc; } #define USBIN_HVDCP_STS 0x0C #define USBIN_HVDCP_SEL_BIT BIT(4) #define USBIN_HVDCP_SEL_9V_BIT BIT(1) static int smbchg_get_min_parallel_current_ma(struct smbchg_chip *chip) { int rc; u8 reg; rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + USBIN_HVDCP_STS, 1); if (rc < 0) { dev_err(chip->dev, "Couldn't read usb status rc = %d\n", rc); return 0; } if ((reg & USBIN_HVDCP_SEL_BIT) && (reg & USBIN_HVDCP_SEL_9V_BIT)) return chip->parallel.min_9v_current_thr_ma; return chip->parallel.min_current_thr_ma; } #define ICL_STS_1_REG 0x7 #define ICL_STS_2_REG 0x9 #define ICL_STS_MASK 0x1F #define AICL_STS_BIT BIT(5) #define USBIN_SUSPEND_STS_BIT BIT(3) #define USBIN_ACTIVE_PWR_SRC_BIT BIT(1) static bool smbchg_is_parallel_usb_ok(struct smbchg_chip *chip) { int min_current_thr_ma, rc; u8 reg; if (!smbchg_parallel_en) { pr_smb(PR_STATUS, "Parallel charging not enabled\n"); return false; } if (get_prop_charge_type(chip) != POWER_SUPPLY_CHARGE_TYPE_FAST) { pr_smb(PR_STATUS, "Not in fast charge, skipping\n"); return false; } if (get_prop_batt_health(chip) != POWER_SUPPLY_HEALTH_GOOD) { pr_smb(PR_STATUS, "JEITA active, skipping\n"); return false; } rc = smbchg_read(chip, &reg, chip->misc_base + IDEV_STS, 1); if (rc < 0) { dev_err(chip->dev, "Couldn't read status 5 rc = %d\n", rc); return false; } if (get_usb_supply_type(reg) == POWER_SUPPLY_TYPE_USB_CDP) { pr_smb(PR_STATUS, "CDP adapter, skipping\n"); return false; } if (get_usb_supply_type(reg) == POWER_SUPPLY_TYPE_USB) { pr_smb(PR_STATUS, "SDP adapter, skipping\n"); return false; } rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + ICL_STS_2_REG, 1); if (rc) { dev_err(chip->dev, "Could not read usb icl sts 2: %d\n", rc); return false; } /* * If USBIN is suspended or not the active power source, do not enable * parallel charging. The device may be charging off of DCIN. */ if (!!(reg & USBIN_SUSPEND_STS_BIT) || !(reg & USBIN_ACTIVE_PWR_SRC_BIT)) { pr_smb(PR_STATUS, "USB not active power source: %02x\n", reg); return false; } min_current_thr_ma = smbchg_get_min_parallel_current_ma(chip); if (min_current_thr_ma <= 0) { pr_smb(PR_STATUS, "parallel charger unavailable for thr: %d\n", min_current_thr_ma); return false; } if (chip->usb_tl_current_ma < min_current_thr_ma) { pr_smb(PR_STATUS, "Weak USB chg skip enable: %d < %d\n", chip->usb_tl_current_ma, min_current_thr_ma); return false; } return true; } #define FCC_CFG 0xF2 #define FCC_500MA_VAL 0x4 #define FCC_MASK SMB_MASK(4, 0) static int smbchg_set_fastchg_current(struct smbchg_chip *chip, int current_ma) { int i, rc; u8 cur_val; /* the fcc enumerations are the same as the usb currents */ for (i = ARRAY_SIZE(usb_current_table) - 1; i >= 0; i--) { if (current_ma >= usb_current_table[i]) break; } if (i < 0) { dev_err(chip->dev, "Cannot find %dma current_table using %d\n", current_ma, CURRENT_500_MA); rc = smbchg_sec_masked_write(chip, chip->chgr_base + FCC_CFG, FCC_MASK, FCC_500MA_VAL); if (rc < 0) dev_err(chip->dev, "Couldn't set %dmA rc=%d\n", CURRENT_500_MA, rc); else chip->fastchg_current_ma = 500; return rc; } cur_val = i & FCC_MASK; rc = smbchg_sec_masked_write(chip, chip->chgr_base + FCC_CFG, FCC_MASK, cur_val); if (rc < 0) { dev_err(chip->dev, "cannot write to fcc cfg rc = %d\n", rc); return rc; } chip->fastchg_current_ma = usb_current_table[i]; pr_smb(PR_STATUS, "fastcharge current set to %d\n", chip->fastchg_current_ma); return rc; } #define USB_AICL_CFG 0xF3 #define AICL_EN_BIT BIT(2) static void smbchg_rerun_aicl(struct smbchg_chip *chip) { smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG, AICL_EN_BIT, 0); /* Add a delay so that AICL successfully clears */ msleep(50); smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG, AICL_EN_BIT, AICL_EN_BIT); } static void taper_irq_en(struct smbchg_chip *chip, bool en) { mutex_lock(&chip->taper_irq_lock); if (en != chip->taper_irq_enabled) { if (en) { enable_irq(chip->taper_irq); enable_irq_wake(chip->taper_irq); } else { disable_irq_wake(chip->taper_irq); disable_irq_nosync(chip->taper_irq); } chip->taper_irq_enabled = en; } mutex_unlock(&chip->taper_irq_lock); } static void smbchg_parallel_usb_disable(struct smbchg_chip *chip) { struct power_supply *parallel_psy = get_parallel_psy(chip); if (!parallel_psy) return; pr_smb(PR_STATUS, "disabling parallel charger\n"); taper_irq_en(chip, false); chip->parallel.initial_aicl_ma = 0; chip->parallel.current_max_ma = 0; power_supply_set_current_limit(parallel_psy, SUSPEND_CURRENT_MA * 1000); power_supply_set_present(parallel_psy, false); smbchg_set_fastchg_current(chip, chip->target_fastchg_current_ma); chip->usb_tl_current_ma = calc_thermal_limited_current(chip, chip->usb_target_current_ma); smbchg_set_usb_current_max(chip, chip->usb_tl_current_ma); smbchg_rerun_aicl(chip); } #define PARALLEL_TAPER_MAX_TRIES 3 #define PARALLEL_FCC_PERCENT_REDUCTION 75 #define MINIMUM_PARALLEL_FCC_MA 500 #define CHG_ERROR_BIT BIT(0) #define BAT_TAPER_MODE_BIT BIT(6) static void smbchg_parallel_usb_taper(struct smbchg_chip *chip) { struct power_supply *parallel_psy = get_parallel_psy(chip); union power_supply_propval pval = {0, }; int parallel_fcc_ma, tries = 0; u8 reg = 0; if (!parallel_psy) return; try_again: mutex_lock(&chip->parallel.lock); if (chip->parallel.current_max_ma == 0) { pr_smb(PR_STATUS, "Not parallel charging, skipping\n"); goto done; } parallel_psy->get_property(parallel_psy, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval); tries += 1; parallel_fcc_ma = pval.intval / 1000; pr_smb(PR_STATUS, "try #%d parallel charger fcc = %d\n", tries, parallel_fcc_ma); if (parallel_fcc_ma < MINIMUM_PARALLEL_FCC_MA || tries > PARALLEL_TAPER_MAX_TRIES) { smbchg_parallel_usb_disable(chip); goto done; } pval.intval = 1000 * ((parallel_fcc_ma * PARALLEL_FCC_PERCENT_REDUCTION) / 100); parallel_psy->set_property(parallel_psy, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval); /* * sleep here for 100 ms in order to make sure the charger has a chance * to go back into constant current charging */ mutex_unlock(&chip->parallel.lock); msleep(100); mutex_lock(&chip->parallel.lock); if (chip->parallel.current_max_ma == 0) { pr_smb(PR_STATUS, "Not parallel charging, skipping\n"); goto done; } smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1); if (reg & BAT_TAPER_MODE_BIT) { mutex_unlock(&chip->parallel.lock); goto try_again; } taper_irq_en(chip, true); done: mutex_unlock(&chip->parallel.lock); } static int smbchg_get_aicl_level_ma(struct smbchg_chip *chip) { int rc; u8 reg; rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + ICL_STS_1_REG, 1); if (rc) { dev_err(chip->dev, "Could not read usb icl sts 1: %d\n", rc); return 0; } reg &= ICL_STS_MASK; if (reg >= ARRAY_SIZE(usb_current_table)) { pr_warn("invalid AICL value: %02x\n", reg); return 0; } return usb_current_table[reg]; } static void smbchg_parallel_usb_enable(struct smbchg_chip *chip) { struct power_supply *parallel_psy = get_parallel_psy(chip); union power_supply_propval pval = {0, }; int current_limit_ma, parallel_cl_ma, total_current_ma; int new_parallel_cl_ma, min_current_thr_ma; if (!parallel_psy) return; pr_smb(PR_STATUS, "Attempting to enable parallel charger\n"); min_current_thr_ma = smbchg_get_min_parallel_current_ma(chip); if (min_current_thr_ma <= 0) { pr_smb(PR_STATUS, "parallel charger unavailable for thr: %d\n", min_current_thr_ma); goto disable_parallel; } current_limit_ma = smbchg_get_aicl_level_ma(chip); if (current_limit_ma <= 0) goto disable_parallel; if (chip->parallel.initial_aicl_ma == 0) { if (current_limit_ma < min_current_thr_ma) { pr_smb(PR_STATUS, "Initial AICL very low: %d < %d\n", current_limit_ma, min_current_thr_ma); goto disable_parallel; } chip->parallel.initial_aicl_ma = current_limit_ma; } /* * Use the previous set current from the parallel charger. * Treat 2mA as 0 because that is the suspend current setting */ parallel_cl_ma = chip->parallel.current_max_ma; if (parallel_cl_ma <= SUSPEND_CURRENT_MA) parallel_cl_ma = 0; /* * Set the parallel charge path's input current limit (ICL) * to the total current / 2 */ total_current_ma = current_limit_ma + parallel_cl_ma; if (total_current_ma < chip->parallel.initial_aicl_ma - chip->parallel.allowed_lowering_ma) { pr_smb(PR_STATUS, "Too little total current : %d (%d + %d) < %d - %d\n", total_current_ma, current_limit_ma, parallel_cl_ma, chip->parallel.initial_aicl_ma, chip->parallel.allowed_lowering_ma); goto disable_parallel; } new_parallel_cl_ma = total_current_ma / 2; if (new_parallel_cl_ma == parallel_cl_ma) { pr_smb(PR_STATUS, "AICL at %d, old ICL: %d new ICL: %d, skipping\n", current_limit_ma, parallel_cl_ma, new_parallel_cl_ma); return; } else { pr_smb(PR_STATUS, "AICL at %d, old ICL: %d new ICL: %d\n", current_limit_ma, parallel_cl_ma, new_parallel_cl_ma); } taper_irq_en(chip, true); chip->parallel.current_max_ma = new_parallel_cl_ma; power_supply_set_present(parallel_psy, true); smbchg_set_fastchg_current(chip, chip->target_fastchg_current_ma / 2); pval.intval = chip->target_fastchg_current_ma * 1000 / 2; parallel_psy->set_property(parallel_psy, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval); smbchg_set_usb_current_max(chip, chip->parallel.current_max_ma); power_supply_set_current_limit(parallel_psy, chip->parallel.current_max_ma * 1000); return; disable_parallel: if (chip->parallel.current_max_ma != 0) { pr_smb(PR_STATUS, "disabling parallel charger\n"); smbchg_parallel_usb_disable(chip); } } static void smbchg_parallel_usb_en_work(struct work_struct *work) { struct smbchg_chip *chip = container_of(work, struct smbchg_chip, parallel_en_work.work); smbchg_relax(chip, PM_PARALLEL_CHECK); mutex_lock(&chip->parallel.lock); if (smbchg_is_parallel_usb_ok(chip)) smbchg_parallel_usb_enable(chip); mutex_unlock(&chip->parallel.lock); } #define PARALLEL_CHARGER_EN_DELAY_MS 3500 static void smbchg_parallel_usb_check_ok(struct smbchg_chip *chip) { struct power_supply *parallel_psy = get_parallel_psy(chip); if (!parallel_psy) return; mutex_lock(&chip->parallel.lock); if (smbchg_is_parallel_usb_ok(chip)) { smbchg_stay_awake(chip, PM_PARALLEL_CHECK); schedule_delayed_work( &chip->parallel_en_work, msecs_to_jiffies(PARALLEL_CHARGER_EN_DELAY_MS)); } else if (chip->parallel.current_max_ma != 0) { pr_smb(PR_STATUS, "parallel charging unavailable\n"); smbchg_parallel_usb_disable(chip); } mutex_unlock(&chip->parallel.lock); } static int smbchg_usb_en(struct smbchg_chip *chip, bool enable, enum enable_reason reason) { bool changed = false; int rc = smbchg_primary_usb_en(chip, enable, reason, &changed); if (changed) smbchg_parallel_usb_check_ok(chip); return rc; } /* * set the dc charge path's maximum allowed current draw * that may be limited by the system's thermal level */ static int smbchg_set_thermal_limited_dc_current_max(struct smbchg_chip *chip, int current_ma) { current_ma = calc_thermal_limited_current(chip, current_ma); return smbchg_set_dc_current_max(chip, current_ma); } /* * set the usb charge path's maximum allowed current draw * that may be limited by the system's thermal level */ static int smbchg_set_thermal_limited_usb_current_max(struct smbchg_chip *chip, int current_ma) { int rc; chip->usb_tl_current_ma = calc_thermal_limited_current(chip, current_ma); rc = smbchg_set_usb_current_max(chip, chip->usb_tl_current_ma); smbchg_rerun_aicl(chip); smbchg_parallel_usb_check_ok(chip); return rc; } static int smbchg_system_temp_level_set(struct smbchg_chip *chip, int lvl_sel) { int rc = 0; int prev_therm_lvl; if (!chip->thermal_mitigation) { dev_err(chip->dev, "Thermal mitigation not supported\n"); return -EINVAL; } if (lvl_sel < 0) { dev_err(chip->dev, "Unsupported level selected %d\n", lvl_sel); return -EINVAL; } if (lvl_sel >= chip->thermal_levels) { dev_err(chip->dev, "Unsupported level selected %d forcing %d\n", lvl_sel, chip->thermal_levels - 1); lvl_sel = chip->thermal_levels - 1; } if (lvl_sel == chip->therm_lvl_sel) return 0; mutex_lock(&chip->current_change_lock); prev_therm_lvl = chip->therm_lvl_sel; chip->therm_lvl_sel = lvl_sel; if (chip->therm_lvl_sel == (chip->thermal_levels - 1)) { /* * Disable charging if highest value selected by * setting the DC and USB path in suspend */ rc = smbchg_dc_en(chip, false, REASON_THERMAL); if (rc < 0) { dev_err(chip->dev, "Couldn't set dc suspend rc %d\n", rc); goto out; } rc = smbchg_usb_en(chip, false, REASON_THERMAL); if (rc < 0) { dev_err(chip->dev, "Couldn't set usb suspend rc %d\n", rc); goto out; } goto out; } rc = smbchg_set_thermal_limited_usb_current_max(chip, chip->usb_target_current_ma); rc = smbchg_set_thermal_limited_dc_current_max(chip, chip->dc_target_current_ma); if (prev_therm_lvl == chip->thermal_levels - 1) { /* * If previously highest value was selected charging must have * been disabed. Enable charging by taking the DC and USB path * out of suspend. */ rc = smbchg_dc_en(chip, true, REASON_THERMAL); if (rc < 0) { dev_err(chip->dev, "Couldn't set dc suspend rc %d\n", rc); goto out; } rc = smbchg_usb_en(chip, true, REASON_THERMAL); if (rc < 0) { dev_err(chip->dev, "Couldn't set usb suspend rc %d\n", rc); goto out; } } out: mutex_unlock(&chip->current_change_lock); return rc; } #define UCONV 1000000LL #define VIN_FLASH_UV 5500000LL #define FLASH_V_THRESHOLD 3000000LL #define BUCK_EFFICIENCY 800LL static int smbchg_calc_max_flash_current(struct smbchg_chip *chip) { union power_supply_propval ret = {0, }; int ocv_uv, ibat_ua, esr_uohm, rbatt_uohm, rc; int64_t ibat_flash_ua, total_flash_ua, total_flash_power_fw; if (!chip->bms_psy && chip->bms_psy_name) chip->bms_psy = power_supply_get_by_name((char *)chip->bms_psy_name); /* if bms psy is not found, return 0 uA (no flash available) */ if (!chip->bms_psy) { pr_smb(PR_STATUS, "no bms psy found\n"); return 0; } rc = chip->bms_psy->get_property(chip->bms_psy, POWER_SUPPLY_PROP_VOLTAGE_OCV, &ret); if (rc) { pr_smb(PR_STATUS, "bms psy does not support OCV\n"); return 0; } ocv_uv = ret.intval; rc = chip->bms_psy->get_property(chip->bms_psy, POWER_SUPPLY_PROP_CURRENT_NOW, &ret); if (rc) { pr_smb(PR_STATUS, "bms psy does not support current_now\n"); return 0; } ibat_ua = ret.intval; rc = chip->bms_psy->get_property(chip->bms_psy, POWER_SUPPLY_PROP_RESISTANCE, &ret); if (rc) { pr_smb(PR_STATUS, "bms psy does not support resistance\n"); return 0; } esr_uohm = ret.intval; rbatt_uohm = esr_uohm + chip->rpara_uohm + chip->rslow_uohm; ibat_flash_ua = (div_s64((ocv_uv - FLASH_V_THRESHOLD) * UCONV, rbatt_uohm)) - ibat_ua; total_flash_power_fw = FLASH_V_THRESHOLD * ibat_flash_ua * BUCK_EFFICIENCY; total_flash_ua = div64_s64(total_flash_power_fw, VIN_FLASH_UV * 1000LL); pr_smb(PR_MISC, "ibat_flash=%lld\n, ocv=%d, ibat=%d, rbatt=%d t_flash=%lld\n", ibat_flash_ua, ocv_uv, ibat_ua, rbatt_uohm, total_flash_ua); return (int)total_flash_ua; } static int smbchg_battery_set_property(struct power_supply *psy, enum power_supply_property prop, const union power_supply_propval *val) { struct smbchg_chip *chip = container_of(psy, struct smbchg_chip, batt_psy); switch (prop) { case POWER_SUPPLY_PROP_CHARGING_ENABLED: smbchg_usb_en(chip, val->intval, REASON_USER); smbchg_dc_en(chip, val->intval, REASON_USER); chip->chg_enabled = val->intval; schedule_work(&chip->usb_set_online_work); break; case POWER_SUPPLY_PROP_CAPACITY: chip->fake_battery_soc = val->intval; power_supply_changed(&chip->batt_psy); break; case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL: smbchg_system_temp_level_set(chip, val->intval); break; default: return -EINVAL; } return 0; } static int smbchg_battery_is_writeable(struct power_supply *psy, enum power_supply_property prop) { int rc; switch (prop) { case POWER_SUPPLY_PROP_CHARGING_ENABLED: case POWER_SUPPLY_PROP_CAPACITY: case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL: rc = 1; break; default: rc = 0; break; } return rc; } static int smbchg_battery_get_property(struct power_supply *psy, enum power_supply_property prop, union power_supply_propval *val) { struct smbchg_chip *chip = container_of(psy, struct smbchg_chip, batt_psy); switch (prop) { case POWER_SUPPLY_PROP_STATUS: val->intval = get_prop_batt_status(chip); break; case POWER_SUPPLY_PROP_PRESENT: val->intval = get_prop_batt_present(chip); break; case POWER_SUPPLY_PROP_CHARGING_ENABLED: val->intval = chip->chg_enabled; break; case POWER_SUPPLY_PROP_CHARGE_TYPE: val->intval = get_prop_charge_type(chip); break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = get_prop_batt_capacity(chip); break; case POWER_SUPPLY_PROP_CURRENT_NOW: val->intval = get_prop_batt_current_now(chip); break; case POWER_SUPPLY_PROP_TEMP: val->intval = get_prop_batt_temp(chip); break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = get_prop_batt_voltage_now(chip); break; case POWER_SUPPLY_PROP_HEALTH: val->intval = get_prop_batt_health(chip); break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = POWER_SUPPLY_TECHNOLOGY_LION; break; case POWER_SUPPLY_PROP_FLASH_CURRENT_MAX: val->intval = smbchg_calc_max_flash_current(chip); break; case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: val->intval = chip->fastchg_current_ma * 1000; break; case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL: val->intval = chip->therm_lvl_sel; break; default: return -EINVAL; } return 0; } static enum power_supply_property smbchg_dc_properties[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_CHARGING_ENABLED, }; static int smbchg_dc_set_property(struct power_supply *psy, enum power_supply_property prop, const union power_supply_propval *val) { struct smbchg_chip *chip = container_of(psy, struct smbchg_chip, dc_psy); switch (prop) { case POWER_SUPPLY_PROP_CHARGING_ENABLED: return smbchg_dc_en(chip, val->intval, REASON_POWER_SUPPLY); break; default: return -EINVAL; } return 0; } static int smbchg_dc_get_property(struct power_supply *psy, enum power_supply_property prop, union power_supply_propval *val) { struct smbchg_chip *chip = container_of(psy, struct smbchg_chip, dc_psy); switch (prop) { case POWER_SUPPLY_PROP_PRESENT: val->intval = is_dc_present(chip); break; case POWER_SUPPLY_PROP_CHARGING_ENABLED: val->intval = chip->dc_suspended == 0; break; case POWER_SUPPLY_PROP_ONLINE: /* return if dc is charging the battery */ val->intval = (smbchg_get_pwr_path(chip) == PWR_PATH_DC) && (get_prop_batt_status(chip) == POWER_SUPPLY_STATUS_CHARGING); break; default: return -EINVAL; } return 0; } static int smbchg_dc_is_writeable(struct power_supply *psy, enum power_supply_property prop) { int rc; switch (prop) { case POWER_SUPPLY_PROP_CHARGING_ENABLED: rc = 1; break; default: rc = 0; break; } return rc; } #define USBIN_SUSPEND_SRC_BIT BIT(6) static void smbchg_unknown_battery_en(struct smbchg_chip *chip, bool en) { int rc; if (en == chip->battery_unknown || chip->charge_unknown_battery) return; chip->battery_unknown = en; rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, USBIN_SUSPEND_SRC_BIT, en ? 0 : USBIN_SUSPEND_SRC_BIT); if (rc < 0) { dev_err(chip->dev, "Couldn't set usb_chgpth cfg rc=%d\n", rc); return; } } #define CMD_CHG_REG 0x42 #define EN_BAT_CHG_BIT BIT(1) static int smbchg_charging_en(struct smbchg_chip *chip, bool en) { return smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG, EN_BAT_CHG_BIT, en ? 0 : EN_BAT_CHG_BIT); } #define UNKNOWN_BATT_TYPE "Unknown Battery" #define LOADING_BATT_TYPE "Loading Battery Data" static void smbchg_external_power_changed(struct power_supply *psy) { struct smbchg_chip *chip = container_of(psy, struct smbchg_chip, batt_psy); union power_supply_propval prop = {0,}; int rc, current_limit = 0; bool en; if (chip->bms_psy_name) chip->bms_psy = power_supply_get_by_name((char *)chip->bms_psy_name); if (chip->bms_psy) { chip->bms_psy->get_property(chip->bms_psy, POWER_SUPPLY_PROP_BATTERY_TYPE, &prop); en = strcmp(prop.strval, UNKNOWN_BATT_TYPE) != 0; smbchg_unknown_battery_en(chip, en); en = strcmp(prop.strval, LOADING_BATT_TYPE) != 0; smbchg_charging_en(chip, en); } rc = chip->usb_psy->get_property(chip->usb_psy, POWER_SUPPLY_PROP_CHARGING_ENABLED, &prop); if (rc < 0) pr_smb(PR_MISC, "could not read USB charge_en, rc=%d\n", rc); else smbchg_usb_en(chip, prop.intval, REASON_POWER_SUPPLY); rc = chip->usb_psy->get_property(chip->usb_psy, POWER_SUPPLY_PROP_CURRENT_MAX, &prop); if (rc < 0) dev_err(chip->dev, "could not read USB current_max property, rc=%d\n", rc); else current_limit = prop.intval / 1000; pr_smb(PR_MISC, "current_limit = %d\n", current_limit); mutex_lock(&chip->current_change_lock); if (current_limit != chip->usb_target_current_ma) { pr_smb(PR_STATUS, "changed current_limit = %d\n", current_limit); chip->usb_target_current_ma = current_limit; rc = smbchg_set_thermal_limited_usb_current_max(chip, current_limit); if (rc < 0) dev_err(chip->dev, "Couldn't set usb current rc = %d\n", rc); } mutex_unlock(&chip->current_change_lock); power_supply_changed(&chip->batt_psy); } #define VFLOAT_CFG_REG 0xF4 #define MIN_FLOAT_MV 3600 #define MAX_FLOAT_MV 4500 #define VFLOAT_MASK SMB_MASK(5, 0) #define MID_RANGE_FLOAT_MV_MIN 3600 #define MID_RANGE_FLOAT_MIN_VAL 0x05 #define MID_RANGE_FLOAT_STEP_MV 20 #define HIGH_RANGE_FLOAT_MIN_MV 4340 #define HIGH_RANGE_FLOAT_MIN_VAL 0x2A #define HIGH_RANGE_FLOAT_STEP_MV 10 #define VHIGH_RANGE_FLOAT_MIN_MV 4400 #define VHIGH_RANGE_FLOAT_MIN_VAL 0x2E #define VHIGH_RANGE_FLOAT_STEP_MV 20 static int smbchg_float_voltage_set(struct smbchg_chip *chip, int vfloat_mv) { u8 temp; if ((vfloat_mv < MIN_FLOAT_MV) || (vfloat_mv > MAX_FLOAT_MV)) { dev_err(chip->dev, "bad float voltage mv =%d asked to set\n", vfloat_mv); return -EINVAL; } if (vfloat_mv <= HIGH_RANGE_FLOAT_MIN_MV) { /* mid range */ temp = MID_RANGE_FLOAT_MIN_VAL + (vfloat_mv - MID_RANGE_FLOAT_MV_MIN) / MID_RANGE_FLOAT_STEP_MV; } else if (vfloat_mv <= VHIGH_RANGE_FLOAT_MIN_MV) { /* high range */ temp = HIGH_RANGE_FLOAT_MIN_VAL + (vfloat_mv - HIGH_RANGE_FLOAT_MIN_MV) / HIGH_RANGE_FLOAT_STEP_MV; } else { /* very high range */ temp = VHIGH_RANGE_FLOAT_MIN_VAL + (vfloat_mv - VHIGH_RANGE_FLOAT_MIN_MV) / VHIGH_RANGE_FLOAT_STEP_MV; } return smbchg_sec_masked_write(chip, chip->chgr_base + VFLOAT_CFG_REG, VFLOAT_MASK, temp); } #define OTG_EN BIT(0) static int smbchg_otg_regulator_enable(struct regulator_dev *rdev) { int rc = 0; struct smbchg_chip *chip = rdev_get_drvdata(rdev); chip->otg_retries = 0; rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG, OTG_EN, OTG_EN); if (rc < 0) dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n", rc); pr_smb(PR_STATUS, "Enabling OTG Boost\n"); return rc; } static int smbchg_otg_regulator_disable(struct regulator_dev *rdev) { int rc = 0; struct smbchg_chip *chip = rdev_get_drvdata(rdev); rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG, OTG_EN, 0); if (rc < 0) dev_err(chip->dev, "Couldn't disable OTG mode rc=%d\n", rc); pr_smb(PR_STATUS, "Disabling OTG Boost\n"); return rc; } static int smbchg_otg_regulator_is_enable(struct regulator_dev *rdev) { int rc = 0; u8 reg = 0; struct smbchg_chip *chip = rdev_get_drvdata(rdev); rc = smbchg_read(chip, &reg, chip->bat_if_base + CMD_CHG_REG, 1); if (rc < 0) { dev_err(chip->dev, "Couldn't read OTG enable bit rc=%d\n", rc); return rc; } return (reg & OTG_EN) ? 1 : 0; } struct regulator_ops smbchg_otg_reg_ops = { .enable = smbchg_otg_regulator_enable, .disable = smbchg_otg_regulator_disable, .is_enabled = smbchg_otg_regulator_is_enable, }; #define USBIN_CHGR_CFG 0xF1 #define USBIN_ADAPTER_9V 0x3 #define HVDCP_EN_BIT BIT(3) static int smbchg_external_otg_regulator_enable(struct regulator_dev *rdev) { bool changed; int rc = 0; struct smbchg_chip *chip = rdev_get_drvdata(rdev); rc = smbchg_primary_usb_en(chip, false, REASON_OTG, &changed); if (rc < 0) { dev_err(chip->dev, "Couldn't suspend charger rc=%d\n", rc); return rc; } rc = smbchg_read(chip, &chip->original_usbin_allowance, chip->usb_chgpth_base + USBIN_CHGR_CFG, 1); if (rc < 0) { dev_err(chip->dev, "Couldn't read usb allowance rc=%d\n", rc); return rc; } /* * To disallow source detect and usbin_uv interrupts, set the adapter * allowance to 9V, so that the audio boost operating in reverse never * gets detected as a valid input */ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, HVDCP_EN_BIT, 0); if (rc < 0) { dev_err(chip->dev, "Couldn't disable HVDCP rc=%d\n", rc); return rc; } rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USBIN_CHGR_CFG, 0xFF, USBIN_ADAPTER_9V); if (rc < 0) { dev_err(chip->dev, "Couldn't write usb allowance rc=%d\n", rc); return rc; } pr_smb(PR_STATUS, "Enabling OTG Boost\n"); return rc; } static int smbchg_external_otg_regulator_disable(struct regulator_dev *rdev) { bool changed; int rc = 0; struct smbchg_chip *chip = rdev_get_drvdata(rdev); rc = smbchg_primary_usb_en(chip, true, REASON_OTG, &changed); if (rc < 0) { dev_err(chip->dev, "Couldn't unsuspend charger rc=%d\n", rc); return rc; } /* * Reenable HVDCP and set the adapter allowance back to the original * value in order to allow normal USBs to be recognized as a valid * input. */ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, HVDCP_EN_BIT, HVDCP_EN_BIT); if (rc < 0) { dev_err(chip->dev, "Couldn't enable HVDCP rc=%d\n", rc); return rc; } rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USBIN_CHGR_CFG, 0xFF, chip->original_usbin_allowance); if (rc < 0) { dev_err(chip->dev, "Couldn't write usb allowance rc=%d\n", rc); return rc; } pr_smb(PR_STATUS, "Disabling OTG Boost\n"); return rc; } static int smbchg_external_otg_regulator_is_enable(struct regulator_dev *rdev) { struct smbchg_chip *chip = rdev_get_drvdata(rdev); return !smbchg_primary_usb_is_en(chip, REASON_OTG); } struct regulator_ops smbchg_external_otg_reg_ops = { .enable = smbchg_external_otg_regulator_enable, .disable = smbchg_external_otg_regulator_disable, .is_enabled = smbchg_external_otg_regulator_is_enable, }; static int smbchg_regulator_init(struct smbchg_chip *chip) { int rc = 0; struct regulator_init_data *init_data; struct regulator_config cfg = {}; struct device_node *regulator_node; regulator_node = of_get_child_by_name(chip->dev->of_node, "qcom,smbcharger-boost-otg"); init_data = of_get_regulator_init_data(chip->dev, regulator_node); if (!init_data) { dev_err(chip->dev, "Unable to allocate memory\n"); return -ENOMEM; } if (init_data->constraints.name) { chip->otg_vreg.rdesc.owner = THIS_MODULE; chip->otg_vreg.rdesc.type = REGULATOR_VOLTAGE; chip->otg_vreg.rdesc.ops = &smbchg_otg_reg_ops; chip->otg_vreg.rdesc.name = init_data->constraints.name; cfg.dev = chip->dev; cfg.init_data = init_data; cfg.driver_data = chip; cfg.of_node = regulator_node; init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS; chip->otg_vreg.rdev = regulator_register( &chip->otg_vreg.rdesc, &cfg); if (IS_ERR(chip->otg_vreg.rdev)) { rc = PTR_ERR(chip->otg_vreg.rdev); chip->otg_vreg.rdev = NULL; if (rc != -EPROBE_DEFER) dev_err(chip->dev, "OTG reg failed, rc=%d\n", rc); } } if (rc) return rc; regulator_node = of_get_child_by_name(chip->dev->of_node, "qcom,smbcharger-external-otg"); init_data = of_get_regulator_init_data(chip->dev, regulator_node); if (!init_data) { dev_err(chip->dev, "Unable to allocate memory\n"); return -ENOMEM; } if (init_data->constraints.name) { if (of_get_property(chip->dev->of_node, "otg-parent-supply", NULL)) init_data->supply_regulator = "otg-parent"; chip->ext_otg_vreg.rdesc.owner = THIS_MODULE; chip->ext_otg_vreg.rdesc.type = REGULATOR_VOLTAGE; chip->ext_otg_vreg.rdesc.ops = &smbchg_external_otg_reg_ops; chip->ext_otg_vreg.rdesc.name = init_data->constraints.name; cfg.dev = chip->dev; cfg.init_data = init_data; cfg.driver_data = chip; cfg.of_node = regulator_node; init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS; chip->ext_otg_vreg.rdev = regulator_register( &chip->ext_otg_vreg.rdesc, &cfg); if (IS_ERR(chip->ext_otg_vreg.rdev)) { rc = PTR_ERR(chip->ext_otg_vreg.rdev); chip->ext_otg_vreg.rdev = NULL; if (rc != -EPROBE_DEFER) dev_err(chip->dev, "external OTG reg failed, rc=%d\n", rc); } } return rc; } static void smbchg_regulator_deinit(struct smbchg_chip *chip) { if (chip->otg_vreg.rdev) regulator_unregister(chip->otg_vreg.rdev); if (chip->ext_otg_vreg.rdev) regulator_unregister(chip->ext_otg_vreg.rdev); } #define REVISION1_REG 0x0 #define DIG_MINOR 0 #define DIG_MAJOR 1 #define ANA_MINOR 2 #define ANA_MAJOR 3 static int smbchg_low_icl_wa_check(struct smbchg_chip *chip) { int rc = 0; bool enable = (get_prop_batt_status(chip) != POWER_SUPPLY_STATUS_CHARGING); /* only execute workaround if the charger is version 1.x */ if (chip->revision[DIG_MAJOR] > 1) return 0; mutex_lock(&chip->current_change_lock); pr_smb(PR_STATUS, "low icl %s -> %s\n", chip->low_icl_wa_on ? "on" : "off", enable ? "on" : "off"); if (enable == chip->low_icl_wa_on) goto out; chip->low_icl_wa_on = enable; if (enable) { rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, CFG_USB_2_3_SEL_BIT, CFG_USB_2); rc |= smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL, USBIN_MODE_CHG_BIT | USB51_MODE_BIT, USBIN_LIMITED_MODE | USB51_100MA); if (rc) dev_err(chip->dev, "could not set low current limit: %d\n", rc); } else { rc = smbchg_set_thermal_limited_usb_current_max(chip, chip->usb_target_current_ma); if (rc) dev_err(chip->dev, "could not set current limit: %d\n", rc); } out: mutex_unlock(&chip->current_change_lock); return rc; } #define HOT_BAT_HARD_BIT BIT(0) #define HOT_BAT_SOFT_BIT BIT(1) #define COLD_BAT_HARD_BIT BIT(2) #define COLD_BAT_SOFT_BIT BIT(3) #define BAT_OV_BIT BIT(4) #define BAT_LOW_BIT BIT(5) #define BAT_MISSING_BIT BIT(6) #define BAT_TERM_MISSING_BIT BIT(7) static irqreturn_t batt_hot_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; u8 reg = 0; smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1); chip->batt_hot = !!(reg & HOT_BAT_HARD_BIT); pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); smbchg_low_icl_wa_check(chip); smbchg_parallel_usb_check_ok(chip); if (chip->psy_registered) power_supply_changed(&chip->batt_psy); return IRQ_HANDLED; } static irqreturn_t batt_cold_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; u8 reg = 0; smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1); chip->batt_cold = !!(reg & COLD_BAT_HARD_BIT); pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); smbchg_low_icl_wa_check(chip); smbchg_parallel_usb_check_ok(chip); if (chip->psy_registered) power_supply_changed(&chip->batt_psy); return IRQ_HANDLED; } static irqreturn_t batt_warm_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; u8 reg = 0; smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1); chip->batt_warm = !!(reg & HOT_BAT_SOFT_BIT); pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); smbchg_parallel_usb_check_ok(chip); if (chip->psy_registered) power_supply_changed(&chip->batt_psy); return IRQ_HANDLED; } static irqreturn_t batt_cool_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; u8 reg = 0; smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1); chip->batt_cool = !!(reg & COLD_BAT_SOFT_BIT); pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); smbchg_parallel_usb_check_ok(chip); if (chip->psy_registered) power_supply_changed(&chip->batt_psy); return IRQ_HANDLED; } static irqreturn_t batt_pres_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; u8 reg = 0; smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1); chip->batt_present = !(reg & BAT_MISSING_BIT); pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); if (chip->psy_registered) power_supply_changed(&chip->batt_psy); return IRQ_HANDLED; } static irqreturn_t vbat_low_handler(int irq, void *_chip) { pr_warn_ratelimited("vbat low\n"); return IRQ_HANDLED; } static irqreturn_t chg_error_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; pr_smb(PR_INTERRUPT, "chg-error triggered\n"); smbchg_low_icl_wa_check(chip); smbchg_parallel_usb_check_ok(chip); if (chip->psy_registered) power_supply_changed(&chip->batt_psy); return IRQ_HANDLED; } static irqreturn_t fastchg_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; pr_smb(PR_INTERRUPT, "p2f triggered\n"); smbchg_low_icl_wa_check(chip); smbchg_parallel_usb_check_ok(chip); if (chip->psy_registered) power_supply_changed(&chip->batt_psy); return IRQ_HANDLED; } static irqreturn_t chg_hot_handler(int irq, void *_chip) { pr_warn_ratelimited("chg hot\n"); return IRQ_HANDLED; } static irqreturn_t chg_term_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; u8 reg = 0; smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1); pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); smbchg_parallel_usb_check_ok(chip); if (chip->psy_registered) power_supply_changed(&chip->batt_psy); return IRQ_HANDLED; } static irqreturn_t taper_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; u8 reg = 0; taper_irq_en(chip, false); smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1); pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); smbchg_parallel_usb_taper(chip); return IRQ_HANDLED; } static irqreturn_t recharge_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; u8 reg = 0; smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1); pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); smbchg_parallel_usb_check_ok(chip); if (chip->psy_registered) power_supply_changed(&chip->batt_psy); return IRQ_HANDLED; } static irqreturn_t safety_timeout_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; u8 reg = 0; smbchg_read(chip, &reg, chip->misc_base + RT_STS, 1); pr_warn_ratelimited("safety timeout rt_stat = 0x%02x\n", reg); if (chip->psy_registered) power_supply_changed(&chip->batt_psy); return IRQ_HANDLED; } /** * power_ok_handler() - called when the switcher turns on or turns off * @chip: pointer to smbchg_chip * @rt_stat: the status bit indicating switcher turning on or off */ static irqreturn_t power_ok_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; u8 reg = 0; smbchg_read(chip, &reg, chip->misc_base + RT_STS, 1); pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); return IRQ_HANDLED; } /** * dcin_uv_handler() - called when the dc voltage crosses the uv threshold * @chip: pointer to smbchg_chip * @rt_stat: the status bit indicating whether dc voltage is uv */ #define DCIN_UNSUSPEND_DELAY_MS 1000 static irqreturn_t dcin_uv_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; bool dc_present = is_dc_present(chip); pr_smb(PR_STATUS, "chip->dc_present = %d dc_present = %d\n", chip->dc_present, dc_present); if (chip->dc_present != dc_present) { /* dc changed */ chip->dc_present = dc_present; if (chip->psy_registered) power_supply_changed(&chip->dc_psy); } return IRQ_HANDLED; } static void handle_usb_removal(struct smbchg_chip *chip) { struct power_supply *parallel_psy = get_parallel_psy(chip); if (chip->usb_psy) { pr_smb(PR_MISC, "setting usb psy type = %d\n", POWER_SUPPLY_TYPE_UNKNOWN); pr_smb(PR_MISC, "setting usb psy present = %d\n", chip->usb_present); power_supply_set_supply_type(chip->usb_psy, POWER_SUPPLY_TYPE_UNKNOWN); power_supply_set_present(chip->usb_psy, chip->usb_present); schedule_work(&chip->usb_set_online_work); } if (parallel_psy) power_supply_set_present(parallel_psy, false); if (chip->parallel.avail && chip->enable_aicl_wake) { disable_irq_wake(chip->aicl_done_irq); chip->enable_aicl_wake = false; } } static void handle_usb_insertion(struct smbchg_chip *chip) { struct power_supply *parallel_psy = get_parallel_psy(chip); enum power_supply_type usb_supply_type; int rc; char *usb_type_name = "null"; u8 reg = 0; /* usb inserted */ rc = smbchg_read(chip, &reg, chip->misc_base + IDEV_STS, 1); if (rc < 0) dev_err(chip->dev, "Couldn't read status 5 rc = %d\n", rc); usb_type_name = get_usb_type_name(reg); usb_supply_type = get_usb_supply_type(reg); pr_smb(PR_STATUS, "inserted %s, usb psy type = %d stat_5 = 0x%02x\n", usb_type_name, usb_supply_type, reg); if (chip->usb_psy) { pr_smb(PR_MISC, "setting usb psy type = %d\n", usb_supply_type); power_supply_set_supply_type(chip->usb_psy, usb_supply_type); pr_smb(PR_MISC, "setting usb psy present = %d\n", chip->usb_present); power_supply_set_present(chip->usb_psy, chip->usb_present); schedule_work(&chip->usb_set_online_work); } if (parallel_psy) power_supply_set_present(parallel_psy, true); if (chip->parallel.avail && !chip->enable_aicl_wake) { enable_irq_wake(chip->aicl_done_irq); chip->enable_aicl_wake = true; } } /** * usbin_uv_handler() - this is called when USB charger is removed * @chip: pointer to smbchg_chip chip * @rt_stat: the status bit indicating chg insertion/removal */ static irqreturn_t usbin_uv_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; bool usb_present = is_usb_present(chip); pr_smb(PR_STATUS, "chip->usb_present = %d usb_present = %d\n", chip->usb_present, usb_present); if (chip->usb_present && !usb_present) { /* USB removed */ chip->usb_present = usb_present; handle_usb_removal(chip); } return IRQ_HANDLED; } /** * src_detect_handler() - this is called when USB charger type is detected, use * it for handling USB charger insertion * @chip: pointer to smbchg_chip * @rt_stat: the status bit indicating chg insertion/removal */ static irqreturn_t src_detect_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; bool usb_present = is_usb_present(chip); pr_smb(PR_STATUS, "chip->usb_present = %d usb_present = %d\n", chip->usb_present, usb_present); if (!chip->usb_present && usb_present) { /* USB inserted */ chip->usb_present = usb_present; handle_usb_insertion(chip); } return IRQ_HANDLED; } /** * otg_oc_handler() - called when the usb otg goes over current */ #define NUM_OTG_RETRIES 1 static irqreturn_t otg_oc_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; pr_smb(PR_INTERRUPT, "triggered\n"); /* * Due to a HW bug in the PMI8994 charger, the current inrush that * occurs when connecting certain OTG devices can cause the OTG * overcurrent protection to trip. * * The work around is to try reenabling the OTG when getting an * overcurrent interrupt once. */ if (chip->otg_retries < NUM_OTG_RETRIES) { chip->otg_retries += 1; pr_smb(PR_STATUS, "Retrying OTG enable. Try #%d\n", chip->otg_retries); smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG, OTG_EN, 0); msleep(20); smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG, OTG_EN, OTG_EN); } return IRQ_HANDLED; } /** * otg_fail_handler() - called when the usb otg fails * (when vbat < OTG UVLO threshold) */ static irqreturn_t otg_fail_handler(int irq, void *_chip) { pr_smb(PR_INTERRUPT, "triggered\n"); return IRQ_HANDLED; } /** * aicl_done_handler() - called when the usb AICL algorithm is finished * and a current is set. */ static irqreturn_t aicl_done_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; bool usb_present = is_usb_present(chip); pr_smb(PR_INTERRUPT, "aicl_done triggered\n"); if (usb_present) smbchg_parallel_usb_check_ok(chip); return IRQ_HANDLED; } /** * usbid_change_handler() - called when the usb RID changes. * This is used mostly for detecting OTG */ static irqreturn_t usbid_change_handler(int irq, void *_chip) { struct smbchg_chip *chip = _chip; bool otg_present; pr_smb(PR_INTERRUPT, "triggered\n"); /* * After the falling edge of the usbid change interrupt occurs, * there may still be some time before the ADC conversion for USB RID * finishes in the fuel gauge. In the worst case, this could be up to * 15 ms. * * Sleep for 20 ms (minimum msleep time) to wait for the conversion to * finish and the USB RID status register to be updated before trying * to detect OTG insertions. */ msleep(20); otg_present = is_otg_present(chip); if (chip->usb_psy) power_supply_set_usb_otg(chip->usb_psy, otg_present ? 1 : 0); if (otg_present) pr_smb(PR_STATUS, "OTG detected\n"); return IRQ_HANDLED; } static int determine_initial_status(struct smbchg_chip *chip) { /* * It is okay to read the interrupt status here since * interrupts aren't requested. reading interrupt status * clears the interrupt so be careful to read interrupt * status only in interrupt handling code */ batt_pres_handler(0, chip); batt_hot_handler(0, chip); batt_warm_handler(0, chip); batt_cool_handler(0, chip); batt_cold_handler(0, chip); chg_term_handler(0, chip); usbid_change_handler(0, chip); src_detect_handler(0, chip); chip->usb_present = is_usb_present(chip); chip->dc_present = is_dc_present(chip); if (chip->usb_present) handle_usb_insertion(chip); else handle_usb_removal(chip); return 0; } static int prechg_time[] = { 24, 48, 96, 192, }; static int chg_time[] = { 192, 384, 768, 1536, }; enum bpd_type { BPD_TYPE_BAT_NONE, BPD_TYPE_BAT_ID, BPD_TYPE_BAT_THM, BPD_TYPE_BAT_THM_BAT_ID, BPD_TYPE_DEFAULT, }; static const char * const bpd_label[] = { [BPD_TYPE_BAT_NONE] = "bpd_none", [BPD_TYPE_BAT_ID] = "bpd_id", [BPD_TYPE_BAT_THM] = "bpd_thm", [BPD_TYPE_BAT_THM_BAT_ID] = "bpd_thm_id", }; static inline int get_bpd(const char *name) { int i = 0; for (i = 0; i < ARRAY_SIZE(bpd_label); i++) { if (strcmp(bpd_label[i], name) == 0) return i; } return -EINVAL; } #define CHGR_CFG1 0xFB #define RECHG_THRESHOLD_SRC_BIT BIT(1) #define TERM_I_SRC_BIT BIT(2) #define CHGR_CFG2 0xFC #define CHG_INHIB_CFG_REG 0xF7 #define CHG_INHIBIT_50MV_VAL 0x00 #define CHG_INHIBIT_100MV_VAL 0x01 #define CHG_INHIBIT_200MV_VAL 0x02 #define CHG_INHIBIT_300MV_VAL 0x03 #define CHG_INHIBIT_MASK 0x03 #define USE_REGISTER_FOR_CURRENT BIT(2) #define CHG_EN_SRC_BIT BIT(7) #define CHG_EN_COMMAND_BIT BIT(6) #define P2F_CHG_TRAN BIT(5) #define I_TERM_BIT BIT(3) #define AUTO_RECHG_BIT BIT(2) #define CHARGER_INHIBIT_BIT BIT(0) #define CFG_TCC_REG 0xF9 #define CHG_ITERM_50MA 0x1 #define CHG_ITERM_100MA 0x2 #define CHG_ITERM_150MA 0x3 #define CHG_ITERM_200MA 0x4 #define CHG_ITERM_250MA 0x5 #define CHG_ITERM_300MA 0x0 #define CHG_ITERM_500MA 0x6 #define CHG_ITERM_600MA 0x7 #define CHG_ITERM_MASK SMB_MASK(2, 0) #define USB51_COMMAND_POL BIT(2) #define USB51AC_CTRL BIT(1) #define SFT_CFG 0xFD #define TR_8OR32B 0xFE #define BUCK_8_16_FREQ_BIT BIT(0) #define SFT_EN_MASK SMB_MASK(5, 4) #define SFT_TO_MASK SMB_MASK(3, 2) #define PRECHG_SFT_TO_MASK SMB_MASK(1, 0) #define SFT_TIMER_DISABLE_BIT BIT(5) #define PRECHG_SFT_TIMER_DISABLE_BIT BIT(4) #define SAFETY_TIME_MINUTES_SHIFT 2 #define BM_CFG 0xF3 #define BATT_MISSING_ALGO_BIT BIT(2) #define BMD_PIN_SRC_MASK SMB_MASK(1, 0) #define PIN_SRC_SHIFT 0 #define CHGR_CFG 0xFF #define RCHG_LVL_BIT BIT(0) #define CFG_AFVC 0xF5 #define VFLOAT_COMP_ENABLE_MASK SMB_MASK(2, 0) #define TR_RID_REG 0xFA #define FG_INPUT_FET_DELAY_BIT BIT(3) #define TRIM_OPTIONS_7_0 0xF6 #define INPUT_MISSING_POLLER_EN_BIT BIT(3) static int smbchg_hw_init(struct smbchg_chip *chip) { int rc, i; u8 reg, mask; rc = smbchg_read(chip, chip->revision, chip->misc_base + REVISION1_REG, 4); if (rc < 0) { dev_err(chip->dev, "Couldn't read revision rc=%d\n", rc); return rc; } pr_smb(PR_STATUS, "Charger Revision DIG: %d.%d; ANA: %d.%d\n", chip->revision[DIG_MAJOR], chip->revision[DIG_MINOR], chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR]); rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + TR_RID_REG, FG_INPUT_FET_DELAY_BIT, FG_INPUT_FET_DELAY_BIT); if (rc < 0) { dev_err(chip->dev, "Couldn't disable fg input fet delay rc=%d\n", rc); return rc; } rc = smbchg_sec_masked_write(chip, chip->misc_base + TRIM_OPTIONS_7_0, INPUT_MISSING_POLLER_EN_BIT, 0); if (rc < 0) { dev_err(chip->dev, "Couldn't disable input missing poller rc=%d\n", rc); return rc; } /* * force using current from the register i.e. ignore auto * power source detect (APSD) mA ratings */ reg = USE_REGISTER_FOR_CURRENT; rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL, USE_REGISTER_FOR_CURRENT, USE_REGISTER_FOR_CURRENT); if (rc < 0) { dev_err(chip->dev, "Couldn't set input limit cmd rc=%d\n", rc); return rc; } /* * set chg en by cmd register, set chg en by writing bit 1, * enable auto pre to fast, enable current termination, enable * auto recharge, enable chg inhibition */ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG2, CHG_EN_SRC_BIT | CHG_EN_COMMAND_BIT | P2F_CHG_TRAN | I_TERM_BIT | AUTO_RECHG_BIT | CHARGER_INHIBIT_BIT, CHARGER_INHIBIT_BIT | CHG_EN_COMMAND_BIT | (chip->iterm_disabled ? I_TERM_BIT : 0)); if (rc < 0) { dev_err(chip->dev, "Couldn't set chgr_cfg2 rc=%d\n", rc); return rc; } /* * Based on the configuration, use the analog sensors or the fuelgauge * adc for recharge threshold source. */ if (chip->chg_inhibit_source_fg) rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG1, TERM_I_SRC_BIT | RECHG_THRESHOLD_SRC_BIT, RECHG_THRESHOLD_SRC_BIT); else rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG1, TERM_I_SRC_BIT | RECHG_THRESHOLD_SRC_BIT, 0); if (rc < 0) { dev_err(chip->dev, "Couldn't set chgr_cfg2 rc=%d\n", rc); return rc; } /* * control USB suspend via command bits and set correct 100/500mA * polarity on the usb current */ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, USBIN_SUSPEND_SRC_BIT | USB51_COMMAND_POL | USB51AC_CTRL, (chip->charge_unknown_battery ? 0 : USBIN_SUSPEND_SRC_BIT)); if (rc < 0) { dev_err(chip->dev, "Couldn't set usb_chgpth cfg rc=%d\n", rc); return rc; } /* set the float voltage */ if (chip->vfloat_mv != -EINVAL) { rc = smbchg_float_voltage_set(chip, chip->vfloat_mv); if (rc < 0) { dev_err(chip->dev, "Couldn't set float voltage rc = %d\n", rc); return rc; } pr_smb(PR_STATUS, "set vfloat to %d\n", chip->vfloat_mv); } /* set iterm */ if (chip->iterm_ma != -EINVAL) { if (chip->iterm_disabled) { dev_err(chip->dev, "Error: Both iterm_disabled and iterm_ma set\n"); return -EINVAL; } else { if (chip->iterm_ma <= 50) reg = CHG_ITERM_50MA; else if (chip->iterm_ma <= 100) reg = CHG_ITERM_100MA; else if (chip->iterm_ma <= 150) reg = CHG_ITERM_150MA; else if (chip->iterm_ma <= 200) reg = CHG_ITERM_200MA; else if (chip->iterm_ma <= 250) reg = CHG_ITERM_250MA; else if (chip->iterm_ma <= 300) reg = CHG_ITERM_300MA; else if (chip->iterm_ma <= 500) reg = CHG_ITERM_500MA; else reg = CHG_ITERM_600MA; rc = smbchg_sec_masked_write(chip, chip->chgr_base + CFG_TCC_REG, CHG_ITERM_MASK, reg); if (rc) { dev_err(chip->dev, "Couldn't set iterm rc = %d\n", rc); return rc; } pr_smb(PR_STATUS, "set tcc (%d) to 0x%02x\n", chip->iterm_ma, reg); } } /* set the safety time voltage */ if (chip->safety_time != -EINVAL) { reg = (chip->safety_time > 0 ? 0 : SFT_TIMER_DISABLE_BIT) | (chip->prechg_safety_time > 0 ? 0 : PRECHG_SFT_TIMER_DISABLE_BIT); for (i = 0; i < ARRAY_SIZE(chg_time); i++) { if (chip->safety_time <= chg_time[i]) { reg |= i << SAFETY_TIME_MINUTES_SHIFT; break; } } for (i = 0; i < ARRAY_SIZE(prechg_time); i++) { if (chip->prechg_safety_time <= prechg_time[i]) { reg |= i; break; } } rc = smbchg_sec_masked_write(chip, chip->chgr_base + SFT_CFG, SFT_EN_MASK | SFT_TO_MASK | (chip->prechg_safety_time > 0 ? PRECHG_SFT_TO_MASK : 0), reg); if (rc < 0) { dev_err(chip->dev, "Couldn't set safety timer rc = %d\n", rc); return rc; } } /* make the buck switch faster to prevent some vbus oscillation */ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + TR_8OR32B, BUCK_8_16_FREQ_BIT, 0); if (rc < 0) { dev_err(chip->dev, "Couldn't set buck frequency rc = %d\n", rc); return rc; } /* battery missing detection */ mask = BATT_MISSING_ALGO_BIT; reg = chip->bmd_algo_disabled ? BATT_MISSING_ALGO_BIT : 0; if (chip->bmd_pin_src < BPD_TYPE_DEFAULT) { mask |= BMD_PIN_SRC_MASK; reg |= chip->bmd_pin_src << PIN_SRC_SHIFT; } rc = smbchg_sec_masked_write(chip, chip->bat_if_base + BM_CFG, mask, reg); if (rc < 0) { dev_err(chip->dev, "Couldn't set batt_missing config = %d\n", rc); return rc; } smbchg_low_icl_wa_check(chip); /* * The charger needs 20 milliseconds to go into battery supplementary * mode. Sleep here until we are sure it takes into effect. */ msleep(20); smbchg_usb_en(chip, chip->chg_enabled, REASON_USER); smbchg_dc_en(chip, chip->chg_enabled, REASON_USER); /* resume threshold */ if (chip->resume_delta_mv != -EINVAL) { /* * Configure only if the recharge threshold source is not * fuel gauge ADC. */ if (!chip->chg_inhibit_source_fg) { if (chip->resume_delta_mv < 100) reg = CHG_INHIBIT_50MV_VAL; else if (chip->resume_delta_mv < 200) reg = CHG_INHIBIT_100MV_VAL; else if (chip->resume_delta_mv < 300) reg = CHG_INHIBIT_200MV_VAL; else reg = CHG_INHIBIT_300MV_VAL; rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHG_INHIB_CFG_REG, CHG_INHIBIT_MASK, reg); if (rc < 0) { dev_err(chip->dev, "Couldn't set inhibit val rc = %d\n", rc); return rc; } } rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG, RCHG_LVL_BIT, (chip->resume_delta_mv < 200) ? 0 : RCHG_LVL_BIT); if (rc < 0) { dev_err(chip->dev, "Couldn't set recharge rc = %d\n", rc); return rc; } } /* DC path current settings */ if (chip->dc_psy_type != -EINVAL) { rc = smbchg_set_thermal_limited_dc_current_max(chip, chip->dc_target_current_ma); if (rc < 0) { dev_err(chip->dev, "can't set dc current: %d\n", rc); return rc; } } /* * on some devices the battery is powered via external sources which * could raise its voltage above the float voltage. smbchargers go * in to reverse boost in such a situation and the workaround is to * disable float voltage compensation (note that the battery will appear * hot/cold when powered via external source). */ if (chip->soft_vfloat_comp_disabled) { rc = smbchg_sec_masked_write(chip, chip->chgr_base + CFG_AFVC, VFLOAT_COMP_ENABLE_MASK, 0); if (rc < 0) { dev_err(chip->dev, "Couldn't disable soft vfloat rc = %d\n", rc); return rc; } } rc = smbchg_set_fastchg_current(chip, chip->target_fastchg_current_ma); if (rc < 0) { dev_err(chip->dev, "Couldn't set fastchg current = %d\n", rc); return rc; } rc = smbchg_read(chip, &chip->original_usbin_allowance, chip->usb_chgpth_base + USBIN_CHGR_CFG, 1); if (rc < 0) dev_err(chip->dev, "Couldn't read usb allowance rc=%d\n", rc); return rc; } static struct of_device_id smbchg_match_table[] = { { .compatible = "qcom,qpnp-smbcharger", .data = (void *)ARRAY_SIZE(usb_current_table), }, { }, }; #define DC_MA_MIN 300 #define DC_MA_MAX 2000 #define OF_PROP_READ(chip, prop, dt_property, retval, optional) \ do { \ if (retval) \ break; \ if (optional) \ prop = -EINVAL; \ \ retval = of_property_read_u32(chip->spmi->dev.of_node, \ "qcom," dt_property , \ &prop); \ \ if ((retval == -EINVAL) && optional) \ retval = 0; \ else if (retval) \ dev_err(chip->dev, "Error reading " #dt_property \ " property rc = %d\n", rc); \ } while (0) static int smb_parse_dt(struct smbchg_chip *chip) { int rc = 0; struct device_node *node = chip->dev->of_node; const char *dc_psy_type, *bpd; if (!node) { dev_err(chip->dev, "device tree info. missing\n"); return -EINVAL; } /* read optional u32 properties */ OF_PROP_READ(chip, chip->iterm_ma, "iterm-ma", rc, 1); OF_PROP_READ(chip, chip->target_fastchg_current_ma, "fastchg-current-ma", rc, 1); OF_PROP_READ(chip, chip->vfloat_mv, "float-voltage-mv", rc, 1); OF_PROP_READ(chip, chip->safety_time, "charging-timeout-mins", rc, 1); OF_PROP_READ(chip, chip->rpara_uohm, "rparasitic-uohm", rc, 1); OF_PROP_READ(chip, chip->prechg_safety_time, "precharging-timeout-mins", rc, 1); if (chip->safety_time != -EINVAL && (chip->safety_time > chg_time[ARRAY_SIZE(chg_time) - 1])) { dev_err(chip->dev, "Bad charging-timeout-mins %d\n", chip->safety_time); return -EINVAL; } if (chip->prechg_safety_time != -EINVAL && (chip->prechg_safety_time > prechg_time[ARRAY_SIZE(prechg_time) - 1])) { dev_err(chip->dev, "Bad precharging-timeout-mins %d\n", chip->prechg_safety_time); return -EINVAL; } OF_PROP_READ(chip, chip->resume_delta_mv, "resume-delta-mv", rc, 1); OF_PROP_READ(chip, chip->parallel.min_current_thr_ma, "parallel-usb-min-current-ma", rc, 1); OF_PROP_READ(chip, chip->parallel.min_9v_current_thr_ma, "parallel-usb-9v-min-current-ma", rc, 1); OF_PROP_READ(chip, chip->parallel.allowed_lowering_ma, "parallel-allowed-lowering-ma", rc, 1); if (chip->parallel.min_current_thr_ma != -EINVAL && chip->parallel.min_9v_current_thr_ma != -EINVAL) chip->parallel.avail = true; pr_smb(PR_STATUS, "parallel usb thr: %d, 9v thr: %d\n", chip->parallel.min_current_thr_ma, chip->parallel.min_9v_current_thr_ma); /* read boolean configuration properties */ chip->bmd_algo_disabled = of_property_read_bool(node, "qcom,bmd-algo-disabled"); chip->iterm_disabled = of_property_read_bool(node, "qcom,iterm-disabled"); chip->soft_vfloat_comp_disabled = of_property_read_bool(node, "qcom,soft-vfloat-comp-disabled"); chip->chg_enabled = !(of_property_read_bool(node, "qcom,charging-disabled")); chip->charge_unknown_battery = of_property_read_bool(node, "qcom,charge-unknown-battery"); chip->chg_inhibit_source_fg = of_property_read_bool(node, "qcom,chg-inhibit-fg"); /* parse the battery missing detection pin source */ rc = of_property_read_string(chip->spmi->dev.of_node, "qcom,bmd-pin-src", &bpd); if (rc) { /* Select BAT_THM as default BPD scheme */ chip->bmd_pin_src = BPD_TYPE_DEFAULT; rc = 0; } else { chip->bmd_pin_src = get_bpd(bpd); if (chip->bmd_pin_src < 0) { dev_err(chip->dev, "failed to determine bpd schema %d\n", rc); return rc; } } /* parse the dc power supply configuration */ rc = of_property_read_string(node, "qcom,dc-psy-type", &dc_psy_type); if (rc) { chip->dc_psy_type = -EINVAL; rc = 0; } else { if (strcmp(dc_psy_type, "Mains") == 0) chip->dc_psy_type = POWER_SUPPLY_TYPE_MAINS; else if (strcmp(dc_psy_type, "Wireless") == 0) chip->dc_psy_type = POWER_SUPPLY_TYPE_WIRELESS; } if (chip->dc_psy_type != -EINVAL) { OF_PROP_READ(chip, chip->dc_target_current_ma, "dc-psy-ma", rc, 0); if (rc) return rc; if (chip->dc_target_current_ma < DC_MA_MIN || chip->dc_target_current_ma > DC_MA_MAX) { dev_err(chip->dev, "Bad dc mA %d\n", chip->dc_target_current_ma); return -EINVAL; } } /* read the bms power supply name */ rc = of_property_read_string(node, "qcom,bms-psy-name", &chip->bms_psy_name); if (rc) chip->bms_psy_name = NULL; /* read the bms power supply name */ rc = of_property_read_string(node, "qcom,battery-psy-name", &chip->battery_psy_name); if (rc) chip->battery_psy_name = "battery"; if (of_find_property(node, "qcom,thermal-mitigation", &chip->thermal_levels)) { chip->thermal_mitigation = devm_kzalloc(chip->dev, chip->thermal_levels, GFP_KERNEL); if (chip->thermal_mitigation == NULL) { dev_err(chip->dev, "thermal mitigation kzalloc() failed.\n"); return -ENOMEM; } chip->thermal_levels /= sizeof(int); rc = of_property_read_u32_array(node, "qcom,thermal-mitigation", chip->thermal_mitigation, chip->thermal_levels); if (rc) { dev_err(chip->dev, "Couldn't read threm limits rc = %d\n", rc); return rc; } } return 0; } #define SUBTYPE_REG 0x5 #define SMBCHG_CHGR_SUBTYPE 0x1 #define SMBCHG_OTG_SUBTYPE 0x8 #define SMBCHG_BAT_IF_SUBTYPE 0x3 #define SMBCHG_USB_CHGPTH_SUBTYPE 0x4 #define SMBCHG_DC_CHGPTH_SUBTYPE 0x5 #define SMBCHG_MISC_SUBTYPE 0x7 #define REQUEST_IRQ(chip, resource, irq_num, irq_name, irq_handler, flags, rc)\ do { \ irq_num = spmi_get_irq_byname(chip->spmi, \ resource, irq_name); \ if (irq_num < 0) { \ dev_err(chip->dev, "Unable to get " irq_name " irq\n"); \ return -ENXIO; \ } \ rc = devm_request_threaded_irq(chip->dev, \ irq_num, NULL, irq_handler, flags, irq_name, \ chip); \ if (rc < 0) { \ dev_err(chip->dev, "Unable to request " irq_name " irq: %d\n",\ rc); \ return -ENXIO; \ } \ } while (0) static int smbchg_request_irqs(struct smbchg_chip *chip) { int rc = 0; struct resource *resource; struct spmi_resource *spmi_resource; u8 subtype; struct spmi_device *spmi = chip->spmi; unsigned long flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT; spmi_for_each_container_dev(spmi_resource, chip->spmi) { if (!spmi_resource) { dev_err(chip->dev, "spmi resource absent\n"); return rc; } resource = spmi_get_resource(spmi, spmi_resource, IORESOURCE_MEM, 0); if (!(resource && resource->start)) { dev_err(chip->dev, "node %s IO resource absent!\n", spmi->dev.of_node->full_name); return rc; } rc = smbchg_read(chip, &subtype, resource->start + SUBTYPE_REG, 1); if (rc) { dev_err(chip->dev, "Peripheral subtype read failed rc=%d\n", rc); return rc; } switch (subtype) { case SMBCHG_CHGR_SUBTYPE: REQUEST_IRQ(chip, spmi_resource, chip->chg_error_irq, "chg-error", chg_error_handler, flags, rc); REQUEST_IRQ(chip, spmi_resource, chip->taper_irq, "chg-taper-thr", taper_handler, (IRQF_TRIGGER_RISING | IRQF_ONESHOT), rc); disable_irq_nosync(chip->taper_irq); REQUEST_IRQ(chip, spmi_resource, chip->chg_term_irq, "chg-tcc-thr", chg_term_handler, flags, rc); REQUEST_IRQ(chip, spmi_resource, chip->recharge_irq, "chg-rechg-thr", recharge_handler, flags, rc); REQUEST_IRQ(chip, spmi_resource, chip->fastchg_irq, "chg-p2f-thr", fastchg_handler, flags, rc); enable_irq_wake(chip->chg_term_irq); enable_irq_wake(chip->chg_error_irq); enable_irq_wake(chip->fastchg_irq); break; case SMBCHG_BAT_IF_SUBTYPE: REQUEST_IRQ(chip, spmi_resource, chip->batt_hot_irq, "batt-hot", batt_hot_handler, flags, rc); REQUEST_IRQ(chip, spmi_resource, chip->batt_warm_irq, "batt-warm", batt_warm_handler, flags, rc); REQUEST_IRQ(chip, spmi_resource, chip->batt_cool_irq, "batt-cool", batt_cool_handler, flags, rc); REQUEST_IRQ(chip, spmi_resource, chip->batt_cold_irq, "batt-cold", batt_cold_handler, flags, rc); REQUEST_IRQ(chip, spmi_resource, chip->batt_missing_irq, "batt-missing", batt_pres_handler, flags, rc); REQUEST_IRQ(chip, spmi_resource, chip->vbat_low_irq, "batt-low", vbat_low_handler, flags, rc); enable_irq_wake(chip->batt_hot_irq); enable_irq_wake(chip->batt_warm_irq); enable_irq_wake(chip->batt_cool_irq); enable_irq_wake(chip->batt_cold_irq); enable_irq_wake(chip->batt_missing_irq); enable_irq_wake(chip->vbat_low_irq); break; case SMBCHG_USB_CHGPTH_SUBTYPE: REQUEST_IRQ(chip, spmi_resource, chip->usbin_uv_irq, "usbin-uv", usbin_uv_handler, flags, rc); REQUEST_IRQ(chip, spmi_resource, chip->src_detect_irq, "usbin-src-det", src_detect_handler, flags, rc); REQUEST_IRQ(chip, spmi_resource, chip->otg_fail_irq, "otg-fail", otg_fail_handler, flags, rc); REQUEST_IRQ(chip, spmi_resource, chip->otg_oc_irq, "otg-oc", otg_oc_handler, (IRQF_TRIGGER_RISING | IRQF_ONESHOT), rc); REQUEST_IRQ(chip, spmi_resource, chip->aicl_done_irq, "aicl-done", aicl_done_handler, flags, rc); REQUEST_IRQ(chip, spmi_resource, chip->usbid_change_irq, "usbid-change", usbid_change_handler, (IRQF_TRIGGER_FALLING | IRQF_ONESHOT), rc); enable_irq_wake(chip->usbin_uv_irq); enable_irq_wake(chip->src_detect_irq); enable_irq_wake(chip->otg_fail_irq); enable_irq_wake(chip->otg_oc_irq); enable_irq_wake(chip->usbid_change_irq); break; case SMBCHG_DC_CHGPTH_SUBTYPE: REQUEST_IRQ(chip, spmi_resource, chip->dcin_uv_irq, "dcin-uv", dcin_uv_handler, flags, rc); enable_irq_wake(chip->dcin_uv_irq); break; case SMBCHG_MISC_SUBTYPE: REQUEST_IRQ(chip, spmi_resource, chip->power_ok_irq, "power-ok", power_ok_handler, flags, rc); REQUEST_IRQ(chip, spmi_resource, chip->chg_hot_irq, "temp-shutdown", chg_hot_handler, flags, rc); REQUEST_IRQ(chip, spmi_resource, chip->safety_timeout_irq, "safety-timeout", safety_timeout_handler, flags, rc); enable_irq_wake(chip->chg_hot_irq); enable_irq_wake(chip->safety_timeout_irq); break; case SMBCHG_OTG_SUBTYPE: break; } } return rc; } #define REQUIRE_BASE(chip, base, rc) \ do { \ if (!rc && !chip->base) { \ dev_err(chip->dev, "Missing " #base "\n"); \ rc = -EINVAL; \ } \ } while (0) static int smbchg_parse_peripherals(struct smbchg_chip *chip) { int rc = 0; struct resource *resource; struct spmi_resource *spmi_resource; u8 subtype; struct spmi_device *spmi = chip->spmi; spmi_for_each_container_dev(spmi_resource, chip->spmi) { if (!spmi_resource) { dev_err(chip->dev, "spmi resource absent\n"); return rc; } resource = spmi_get_resource(spmi, spmi_resource, IORESOURCE_MEM, 0); if (!(resource && resource->start)) { dev_err(chip->dev, "node %s IO resource absent!\n", spmi->dev.of_node->full_name); return rc; } rc = smbchg_read(chip, &subtype, resource->start + SUBTYPE_REG, 1); if (rc) { dev_err(chip->dev, "Peripheral subtype read failed rc=%d\n", rc); return rc; } switch (subtype) { case SMBCHG_CHGR_SUBTYPE: chip->chgr_base = resource->start; break; case SMBCHG_BAT_IF_SUBTYPE: chip->bat_if_base = resource->start; break; case SMBCHG_USB_CHGPTH_SUBTYPE: chip->usb_chgpth_base = resource->start; break; case SMBCHG_DC_CHGPTH_SUBTYPE: chip->dc_chgpth_base = resource->start; break; case SMBCHG_MISC_SUBTYPE: chip->misc_base = resource->start; break; case SMBCHG_OTG_SUBTYPE: chip->otg_base = resource->start; break; } } REQUIRE_BASE(chip, chgr_base, rc); REQUIRE_BASE(chip, bat_if_base, rc); REQUIRE_BASE(chip, usb_chgpth_base, rc); REQUIRE_BASE(chip, dc_chgpth_base, rc); REQUIRE_BASE(chip, misc_base, rc); return rc; } static inline void dump_reg(struct smbchg_chip *chip, u16 addr, const char *name) { u8 reg; smbchg_read(chip, &reg, addr, 1); pr_smb(PR_DUMP, "%s - %04X = %02X\n", name, addr, reg); } /* dumps useful registers for debug */ static void dump_regs(struct smbchg_chip *chip) { u16 addr; /* charger peripheral */ for (addr = 0xB; addr <= 0x10; addr++) dump_reg(chip, chip->chgr_base + addr, "CHGR Status"); for (addr = 0xF0; addr <= 0xFF; addr++) dump_reg(chip, chip->chgr_base + addr, "CHGR Config"); /* battery interface peripheral */ dump_reg(chip, chip->bat_if_base + RT_STS, "BAT_IF Status"); dump_reg(chip, chip->bat_if_base + CMD_CHG_REG, "BAT_IF Command"); for (addr = 0xF0; addr <= 0xFB; addr++) dump_reg(chip, chip->bat_if_base + addr, "BAT_IF Config"); /* usb charge path peripheral */ for (addr = 0x7; addr <= 0x10; addr++) dump_reg(chip, chip->usb_chgpth_base + addr, "USB Status"); dump_reg(chip, chip->usb_chgpth_base + CMD_IL, "USB Command"); for (addr = 0xF0; addr <= 0xF5; addr++) dump_reg(chip, chip->usb_chgpth_base + addr, "USB Config"); /* dc charge path peripheral */ dump_reg(chip, chip->dc_chgpth_base + RT_STS, "DC Status"); for (addr = 0xF0; addr <= 0xF6; addr++) dump_reg(chip, chip->dc_chgpth_base + addr, "DC Config"); /* misc peripheral */ dump_reg(chip, chip->misc_base + IDEV_STS, "MISC Status"); dump_reg(chip, chip->misc_base + RT_STS, "MISC Status"); for (addr = 0xF0; addr <= 0xF3; addr++) dump_reg(chip, chip->misc_base + addr, "MISC CFG"); } static int smbchg_probe(struct spmi_device *spmi) { int rc; struct smbchg_chip *chip; struct power_supply *usb_psy; usb_psy = power_supply_get_by_name("usb"); if (!usb_psy) { pr_smb(PR_STATUS, "USB supply not found, deferring probe\n"); return -EPROBE_DEFER; } chip = devm_kzalloc(&spmi->dev, sizeof(*chip), GFP_KERNEL); if (!chip) { dev_err(&spmi->dev, "Unable to allocate memory\n"); return -ENOMEM; } INIT_WORK(&chip->usb_set_online_work, smbchg_usb_update_online_work); INIT_DELAYED_WORK(&chip->parallel_en_work, smbchg_parallel_usb_en_work); chip->spmi = spmi; chip->dev = &spmi->dev; chip->usb_psy = usb_psy; chip->fake_battery_soc = -EINVAL; chip->usb_online = -EINVAL; dev_set_drvdata(&spmi->dev, chip); spin_lock_init(&chip->sec_access_lock); mutex_init(&chip->current_change_lock); mutex_init(&chip->usb_set_online_lock); mutex_init(&chip->usb_en_lock); mutex_init(&chip->dc_en_lock); mutex_init(&chip->parallel.lock); mutex_init(&chip->taper_irq_lock); mutex_init(&chip->pm_lock); rc = smbchg_parse_peripherals(chip); if (rc) { dev_err(chip->dev, "Error parsing DT peripherals: %d\n", rc); return rc; } rc = smb_parse_dt(chip); if (rc < 0) { dev_err(&spmi->dev, "Unable to parse DT nodes: %d\n", rc); return rc; } rc = smbchg_regulator_init(chip); if (rc) { dev_err(&spmi->dev, "Couldn't initialize regulator rc=%d\n", rc); return rc; } rc = smbchg_hw_init(chip); if (rc < 0) { dev_err(&spmi->dev, "Unable to intialize hardware rc = %d\n", rc); goto free_regulator; } rc = determine_initial_status(chip); if (rc < 0) { dev_err(&spmi->dev, "Unable to determine init status rc = %d\n", rc); goto free_regulator; } chip->batt_psy.name = chip->battery_psy_name; chip->batt_psy.type = POWER_SUPPLY_TYPE_BATTERY; chip->batt_psy.get_property = smbchg_battery_get_property; chip->batt_psy.set_property = smbchg_battery_set_property; chip->batt_psy.properties = smbchg_battery_properties; chip->batt_psy.num_properties = ARRAY_SIZE(smbchg_battery_properties); chip->batt_psy.external_power_changed = smbchg_external_power_changed; chip->batt_psy.property_is_writeable = smbchg_battery_is_writeable; rc = power_supply_register(chip->dev, &chip->batt_psy); if (rc < 0) { dev_err(&spmi->dev, "Unable to register batt_psy rc = %d\n", rc); goto free_regulator; } if (chip->dc_psy_type != -EINVAL) { chip->dc_psy.name = "dc"; chip->dc_psy.type = chip->dc_psy_type; chip->dc_psy.get_property = smbchg_dc_get_property; chip->dc_psy.set_property = smbchg_dc_set_property; chip->dc_psy.property_is_writeable = smbchg_dc_is_writeable; chip->dc_psy.properties = smbchg_dc_properties; chip->dc_psy.num_properties = ARRAY_SIZE(smbchg_dc_properties); rc = power_supply_register(chip->dev, &chip->dc_psy); if (rc < 0) { dev_err(&spmi->dev, "Unable to register dc_psy rc = %d\n", rc); goto unregister_batt_psy; } } chip->psy_registered = true; rc = smbchg_request_irqs(chip); if (rc < 0) { dev_err(&spmi->dev, "Unable to request irqs rc = %d\n", rc); goto unregister_dc_psy; } power_supply_set_present(chip->usb_psy, chip->usb_present); dump_regs(chip); dev_info(chip->dev, "SMBCHG successfully probed batt=%d dc = %d usb = %d\n", get_prop_batt_present(chip), chip->dc_present, chip->usb_present); return 0; unregister_dc_psy: power_supply_unregister(&chip->dc_psy); unregister_batt_psy: power_supply_unregister(&chip->batt_psy); free_regulator: smbchg_regulator_deinit(chip); handle_usb_removal(chip); return rc; } static int smbchg_remove(struct spmi_device *spmi) { struct smbchg_chip *chip = dev_get_drvdata(&spmi->dev); if (chip->dc_psy_type != -EINVAL) power_supply_unregister(&chip->dc_psy); power_supply_unregister(&chip->batt_psy); smbchg_regulator_deinit(chip); return 0; } static const struct dev_pm_ops smbchg_pm_ops = { }; MODULE_DEVICE_TABLE(spmi, smbchg_id); static struct spmi_driver smbchg_driver = { .driver = { .name = "qpnp-smbcharger", .owner = THIS_MODULE, .of_match_table = smbchg_match_table, .pm = &smbchg_pm_ops, }, .probe = smbchg_probe, .remove = smbchg_remove, }; static int __init smbchg_init(void) { return spmi_driver_register(&smbchg_driver); } static void __exit smbchg_exit(void) { return spmi_driver_unregister(&smbchg_driver); } module_init(smbchg_init); module_exit(smbchg_exit); MODULE_DESCRIPTION("QPNP SMB Charger"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:qpnp-smbcharger");
gpl-2.0
mlachwani/Android_4.4.2_MotoG_Kernel
sound/pci/hda/patch_sigmatel.c
2719
186831
/* * Universal Interface for Intel High Definition Audio Codec * * HD audio interface patch for SigmaTel STAC92xx * * Copyright (c) 2005 Embedded Alley Solutions, Inc. * Matt Porter <mporter@embeddedalley.com> * * Based on patch_cmedia.c and patch_realtek.c * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de> * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/dmi.h> #include <linux/module.h> #include <sound/core.h> #include <sound/asoundef.h> #include <sound/jack.h> #include <sound/tlv.h> #include "hda_codec.h" #include "hda_local.h" #include "hda_beep.h" #include "hda_jack.h" enum { STAC_VREF_EVENT = 1, STAC_INSERT_EVENT, STAC_PWR_EVENT, STAC_HP_EVENT, STAC_LO_EVENT, STAC_MIC_EVENT, }; enum { STAC_AUTO, STAC_REF, STAC_9200_OQO, STAC_9200_DELL_D21, STAC_9200_DELL_D22, STAC_9200_DELL_D23, STAC_9200_DELL_M21, STAC_9200_DELL_M22, STAC_9200_DELL_M23, STAC_9200_DELL_M24, STAC_9200_DELL_M25, STAC_9200_DELL_M26, STAC_9200_DELL_M27, STAC_9200_M4, STAC_9200_M4_2, STAC_9200_PANASONIC, STAC_9200_MODELS }; enum { STAC_9205_AUTO, STAC_9205_REF, STAC_9205_DELL_M42, STAC_9205_DELL_M43, STAC_9205_DELL_M44, STAC_9205_EAPD, STAC_9205_MODELS }; enum { STAC_92HD73XX_AUTO, STAC_92HD73XX_NO_JD, /* no jack-detection */ STAC_92HD73XX_REF, STAC_92HD73XX_INTEL, STAC_DELL_M6_AMIC, STAC_DELL_M6_DMIC, STAC_DELL_M6_BOTH, STAC_DELL_EQ, STAC_ALIENWARE_M17X, STAC_92HD73XX_MODELS }; enum { STAC_92HD83XXX_AUTO, STAC_92HD83XXX_REF, STAC_92HD83XXX_PWR_REF, STAC_DELL_S14, STAC_DELL_VOSTRO_3500, STAC_92HD83XXX_HP_cNB11_INTQUAD, STAC_HP_DV7_4000, STAC_HP_ZEPHYR, STAC_92HD83XXX_MODELS }; enum { STAC_92HD71BXX_AUTO, STAC_92HD71BXX_REF, STAC_DELL_M4_1, STAC_DELL_M4_2, STAC_DELL_M4_3, STAC_HP_M4, STAC_HP_DV4, STAC_HP_DV5, STAC_HP_HDX, STAC_HP_DV4_1222NR, STAC_92HD71BXX_MODELS }; enum { STAC_925x_AUTO, STAC_925x_REF, STAC_M1, STAC_M1_2, STAC_M2, STAC_M2_2, STAC_M3, STAC_M5, STAC_M6, STAC_925x_MODELS }; enum { STAC_922X_AUTO, STAC_D945_REF, STAC_D945GTP3, STAC_D945GTP5, STAC_INTEL_MAC_V1, STAC_INTEL_MAC_V2, STAC_INTEL_MAC_V3, STAC_INTEL_MAC_V4, STAC_INTEL_MAC_V5, STAC_INTEL_MAC_AUTO, /* This model is selected if no module parameter * is given, one of the above models will be * chosen according to the subsystem id. */ /* for backward compatibility */ STAC_MACMINI, STAC_MACBOOK, STAC_MACBOOK_PRO_V1, STAC_MACBOOK_PRO_V2, STAC_IMAC_INTEL, STAC_IMAC_INTEL_20, STAC_ECS_202, STAC_922X_DELL_D81, STAC_922X_DELL_D82, STAC_922X_DELL_M81, STAC_922X_DELL_M82, STAC_922X_MODELS }; enum { STAC_927X_AUTO, STAC_D965_REF_NO_JD, /* no jack-detection */ STAC_D965_REF, STAC_D965_3ST, STAC_D965_5ST, STAC_D965_5ST_NO_FP, STAC_DELL_3ST, STAC_DELL_BIOS, STAC_927X_VOLKNOB, STAC_927X_MODELS }; enum { STAC_9872_AUTO, STAC_9872_VAIO, STAC_9872_MODELS }; struct sigmatel_mic_route { hda_nid_t pin; signed char mux_idx; signed char dmux_idx; }; #define MAX_PINS_NUM 16 #define MAX_ADCS_NUM 4 #define MAX_DMICS_NUM 4 struct sigmatel_spec { struct snd_kcontrol_new *mixers[4]; unsigned int num_mixers; int board_config; unsigned int eapd_switch: 1; unsigned int surr_switch: 1; unsigned int alt_switch: 1; unsigned int hp_detect: 1; unsigned int spdif_mute: 1; unsigned int check_volume_offset:1; unsigned int auto_mic:1; unsigned int linear_tone_beep:1; /* gpio lines */ unsigned int eapd_mask; unsigned int gpio_mask; unsigned int gpio_dir; unsigned int gpio_data; unsigned int gpio_mute; unsigned int gpio_led; unsigned int gpio_led_polarity; unsigned int vref_mute_led_nid; /* pin NID for mute-LED vref control */ unsigned int vref_led; /* stream */ unsigned int stream_delay; /* analog loopback */ const struct snd_kcontrol_new *aloopback_ctl; unsigned char aloopback_mask; unsigned char aloopback_shift; /* power management */ unsigned int num_pwrs; const hda_nid_t *pwr_nids; const hda_nid_t *dac_list; /* playback */ struct hda_input_mux *mono_mux; unsigned int cur_mmux; struct hda_multi_out multiout; hda_nid_t dac_nids[5]; hda_nid_t hp_dacs[5]; hda_nid_t speaker_dacs[5]; int volume_offset; /* capture */ const hda_nid_t *adc_nids; unsigned int num_adcs; const hda_nid_t *mux_nids; unsigned int num_muxes; const hda_nid_t *dmic_nids; unsigned int num_dmics; const hda_nid_t *dmux_nids; unsigned int num_dmuxes; const hda_nid_t *smux_nids; unsigned int num_smuxes; unsigned int num_analog_muxes; const unsigned long *capvols; /* amp-volume attr: HDA_COMPOSE_AMP_VAL() */ const unsigned long *capsws; /* amp-mute attr: HDA_COMPOSE_AMP_VAL() */ unsigned int num_caps; /* number of capture volume/switch elements */ struct sigmatel_mic_route ext_mic; struct sigmatel_mic_route int_mic; struct sigmatel_mic_route dock_mic; const char * const *spdif_labels; hda_nid_t dig_in_nid; hda_nid_t mono_nid; hda_nid_t anabeep_nid; hda_nid_t digbeep_nid; /* pin widgets */ const hda_nid_t *pin_nids; unsigned int num_pins; /* codec specific stuff */ const struct hda_verb *init; const struct snd_kcontrol_new *mixer; /* capture source */ struct hda_input_mux *dinput_mux; unsigned int cur_dmux[2]; struct hda_input_mux *input_mux; unsigned int cur_mux[3]; struct hda_input_mux *sinput_mux; unsigned int cur_smux[2]; unsigned int cur_amux; hda_nid_t *amp_nids; unsigned int powerdown_adcs; /* i/o switches */ unsigned int io_switch[2]; unsigned int clfe_swap; hda_nid_t line_switch; /* shared line-in for input and output */ hda_nid_t mic_switch; /* shared mic-in for input and output */ hda_nid_t hp_switch; /* NID of HP as line-out */ unsigned int aloopback; struct hda_pcm pcm_rec[2]; /* PCM information */ /* dynamic controls and input_mux */ struct auto_pin_cfg autocfg; struct snd_array kctls; struct hda_input_mux private_dimux; struct hda_input_mux private_imux; struct hda_input_mux private_smux; struct hda_input_mux private_mono_mux; /* auto spec */ unsigned auto_pin_cnt; hda_nid_t auto_pin_nids[MAX_PINS_NUM]; unsigned auto_adc_cnt; hda_nid_t auto_adc_nids[MAX_ADCS_NUM]; hda_nid_t auto_mux_nids[MAX_ADCS_NUM]; hda_nid_t auto_dmux_nids[MAX_ADCS_NUM]; unsigned long auto_capvols[MAX_ADCS_NUM]; unsigned auto_dmic_cnt; hda_nid_t auto_dmic_nids[MAX_DMICS_NUM]; struct hda_vmaster_mute_hook vmaster_mute; }; static const hda_nid_t stac9200_adc_nids[1] = { 0x03, }; static const hda_nid_t stac9200_mux_nids[1] = { 0x0c, }; static const hda_nid_t stac9200_dac_nids[1] = { 0x02, }; static const hda_nid_t stac92hd73xx_pwr_nids[8] = { 0x0a, 0x0b, 0x0c, 0xd, 0x0e, 0x0f, 0x10, 0x11 }; static const hda_nid_t stac92hd73xx_slave_dig_outs[2] = { 0x26, 0, }; static const hda_nid_t stac92hd73xx_adc_nids[2] = { 0x1a, 0x1b }; #define STAC92HD73XX_NUM_DMICS 2 static const hda_nid_t stac92hd73xx_dmic_nids[STAC92HD73XX_NUM_DMICS + 1] = { 0x13, 0x14, 0 }; #define STAC92HD73_DAC_COUNT 5 static const hda_nid_t stac92hd73xx_mux_nids[2] = { 0x20, 0x21, }; static const hda_nid_t stac92hd73xx_dmux_nids[2] = { 0x20, 0x21, }; static const hda_nid_t stac92hd73xx_smux_nids[2] = { 0x22, 0x23, }; #define STAC92HD73XX_NUM_CAPS 2 static const unsigned long stac92hd73xx_capvols[] = { HDA_COMPOSE_AMP_VAL(0x20, 3, 0, HDA_OUTPUT), HDA_COMPOSE_AMP_VAL(0x21, 3, 0, HDA_OUTPUT), }; #define stac92hd73xx_capsws stac92hd73xx_capvols #define STAC92HD83_DAC_COUNT 3 static const hda_nid_t stac92hd83xxx_pwr_nids[7] = { 0x0a, 0x0b, 0x0c, 0xd, 0x0e, 0x0f, 0x10 }; static const hda_nid_t stac92hd83xxx_slave_dig_outs[2] = { 0x1e, 0, }; static const hda_nid_t stac92hd83xxx_dmic_nids[] = { 0x11, 0x20, }; static const hda_nid_t stac92hd71bxx_pwr_nids[3] = { 0x0a, 0x0d, 0x0f }; static const hda_nid_t stac92hd71bxx_adc_nids[2] = { 0x12, 0x13, }; static const hda_nid_t stac92hd71bxx_mux_nids[2] = { 0x1a, 0x1b }; static const hda_nid_t stac92hd71bxx_dmux_nids[2] = { 0x1c, 0x1d, }; static const hda_nid_t stac92hd71bxx_smux_nids[2] = { 0x24, 0x25, }; #define STAC92HD71BXX_NUM_DMICS 2 static const hda_nid_t stac92hd71bxx_dmic_nids[STAC92HD71BXX_NUM_DMICS + 1] = { 0x18, 0x19, 0 }; static const hda_nid_t stac92hd71bxx_dmic_5port_nids[STAC92HD71BXX_NUM_DMICS] = { 0x18, 0 }; static const hda_nid_t stac92hd71bxx_slave_dig_outs[2] = { 0x22, 0 }; #define STAC92HD71BXX_NUM_CAPS 2 static const unsigned long stac92hd71bxx_capvols[] = { HDA_COMPOSE_AMP_VAL(0x1c, 3, 0, HDA_OUTPUT), HDA_COMPOSE_AMP_VAL(0x1d, 3, 0, HDA_OUTPUT), }; #define stac92hd71bxx_capsws stac92hd71bxx_capvols static const hda_nid_t stac925x_adc_nids[1] = { 0x03, }; static const hda_nid_t stac925x_mux_nids[1] = { 0x0f, }; static const hda_nid_t stac925x_dac_nids[1] = { 0x02, }; #define STAC925X_NUM_DMICS 1 static const hda_nid_t stac925x_dmic_nids[STAC925X_NUM_DMICS + 1] = { 0x15, 0 }; static const hda_nid_t stac925x_dmux_nids[1] = { 0x14, }; static const unsigned long stac925x_capvols[] = { HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_OUTPUT), }; static const unsigned long stac925x_capsws[] = { HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_OUTPUT), }; static const hda_nid_t stac922x_adc_nids[2] = { 0x06, 0x07, }; static const hda_nid_t stac922x_mux_nids[2] = { 0x12, 0x13, }; #define STAC922X_NUM_CAPS 2 static const unsigned long stac922x_capvols[] = { HDA_COMPOSE_AMP_VAL(0x17, 3, 0, HDA_INPUT), HDA_COMPOSE_AMP_VAL(0x18, 3, 0, HDA_INPUT), }; #define stac922x_capsws stac922x_capvols static const hda_nid_t stac927x_slave_dig_outs[2] = { 0x1f, 0, }; static const hda_nid_t stac927x_adc_nids[3] = { 0x07, 0x08, 0x09 }; static const hda_nid_t stac927x_mux_nids[3] = { 0x15, 0x16, 0x17 }; static const hda_nid_t stac927x_smux_nids[1] = { 0x21, }; static const hda_nid_t stac927x_dac_nids[6] = { 0x02, 0x03, 0x04, 0x05, 0x06, 0 }; static const hda_nid_t stac927x_dmux_nids[1] = { 0x1b, }; #define STAC927X_NUM_DMICS 2 static const hda_nid_t stac927x_dmic_nids[STAC927X_NUM_DMICS + 1] = { 0x13, 0x14, 0 }; #define STAC927X_NUM_CAPS 3 static const unsigned long stac927x_capvols[] = { HDA_COMPOSE_AMP_VAL(0x18, 3, 0, HDA_INPUT), HDA_COMPOSE_AMP_VAL(0x19, 3, 0, HDA_INPUT), HDA_COMPOSE_AMP_VAL(0x1a, 3, 0, HDA_INPUT), }; static const unsigned long stac927x_capsws[] = { HDA_COMPOSE_AMP_VAL(0x1b, 3, 0, HDA_OUTPUT), HDA_COMPOSE_AMP_VAL(0x1c, 3, 0, HDA_OUTPUT), HDA_COMPOSE_AMP_VAL(0x1d, 3, 0, HDA_OUTPUT), }; static const char * const stac927x_spdif_labels[5] = { "Digital Playback", "ADAT", "Analog Mux 1", "Analog Mux 2", "Analog Mux 3" }; static const hda_nid_t stac9205_adc_nids[2] = { 0x12, 0x13 }; static const hda_nid_t stac9205_mux_nids[2] = { 0x19, 0x1a }; static const hda_nid_t stac9205_dmux_nids[1] = { 0x1d, }; static const hda_nid_t stac9205_smux_nids[1] = { 0x21, }; #define STAC9205_NUM_DMICS 2 static const hda_nid_t stac9205_dmic_nids[STAC9205_NUM_DMICS + 1] = { 0x17, 0x18, 0 }; #define STAC9205_NUM_CAPS 2 static const unsigned long stac9205_capvols[] = { HDA_COMPOSE_AMP_VAL(0x1b, 3, 0, HDA_INPUT), HDA_COMPOSE_AMP_VAL(0x1c, 3, 0, HDA_INPUT), }; static const unsigned long stac9205_capsws[] = { HDA_COMPOSE_AMP_VAL(0x1d, 3, 0, HDA_OUTPUT), HDA_COMPOSE_AMP_VAL(0x1e, 3, 0, HDA_OUTPUT), }; static const hda_nid_t stac9200_pin_nids[8] = { 0x08, 0x09, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, }; static const hda_nid_t stac925x_pin_nids[8] = { 0x07, 0x08, 0x0a, 0x0b, 0x0c, 0x0d, 0x10, 0x11, }; static const hda_nid_t stac922x_pin_nids[10] = { 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x15, 0x1b, }; static const hda_nid_t stac92hd73xx_pin_nids[13] = { 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x22, 0x23 }; #define STAC92HD71BXX_NUM_PINS 13 static const hda_nid_t stac92hd71bxx_pin_nids_4port[STAC92HD71BXX_NUM_PINS] = { 0x0a, 0x0b, 0x0c, 0x0d, 0x00, 0x00, 0x14, 0x18, 0x19, 0x1e, 0x1f, 0x20, 0x27 }; static const hda_nid_t stac92hd71bxx_pin_nids_6port[STAC92HD71BXX_NUM_PINS] = { 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x14, 0x18, 0x19, 0x1e, 0x1f, 0x20, 0x27 }; static const hda_nid_t stac927x_pin_nids[14] = { 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x21, 0x22, 0x23, }; static const hda_nid_t stac9205_pin_nids[12] = { 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x14, 0x16, 0x17, 0x18, 0x21, 0x22, }; static int stac92xx_dmux_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; return snd_hda_input_mux_info(spec->dinput_mux, uinfo); } static int stac92xx_dmux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; unsigned int dmux_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); ucontrol->value.enumerated.item[0] = spec->cur_dmux[dmux_idx]; return 0; } static int stac92xx_dmux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; unsigned int dmux_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); return snd_hda_input_mux_put(codec, spec->dinput_mux, ucontrol, spec->dmux_nids[dmux_idx], &spec->cur_dmux[dmux_idx]); } static int stac92xx_smux_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; return snd_hda_input_mux_info(spec->sinput_mux, uinfo); } static int stac92xx_smux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; unsigned int smux_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); ucontrol->value.enumerated.item[0] = spec->cur_smux[smux_idx]; return 0; } static int stac92xx_smux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; struct hda_input_mux *smux = &spec->private_smux; unsigned int smux_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); int err, val; hda_nid_t nid; err = snd_hda_input_mux_put(codec, spec->sinput_mux, ucontrol, spec->smux_nids[smux_idx], &spec->cur_smux[smux_idx]); if (err < 0) return err; if (spec->spdif_mute) { if (smux_idx == 0) nid = spec->multiout.dig_out_nid; else nid = codec->slave_dig_outs[smux_idx - 1]; if (spec->cur_smux[smux_idx] == smux->num_items - 1) val = HDA_AMP_MUTE; else val = 0; /* un/mute SPDIF out */ snd_hda_codec_amp_stereo(codec, nid, HDA_OUTPUT, 0, HDA_AMP_MUTE, val); } return 0; } static int stac_vrefout_set(struct hda_codec *codec, hda_nid_t nid, unsigned int new_vref) { int error, pinctl; snd_printdd("%s, nid %x ctl %x\n", __func__, nid, new_vref); pinctl = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0); if (pinctl < 0) return pinctl; pinctl &= 0xff; pinctl &= ~AC_PINCTL_VREFEN; pinctl |= (new_vref & AC_PINCTL_VREFEN); error = snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl); if (error < 0) return error; return 1; } static unsigned int stac92xx_vref_set(struct hda_codec *codec, hda_nid_t nid, unsigned int new_vref) { int error; unsigned int pincfg; pincfg = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0); pincfg &= 0xff; pincfg &= ~(AC_PINCTL_VREFEN | AC_PINCTL_IN_EN | AC_PINCTL_OUT_EN); pincfg |= new_vref; if (new_vref == AC_PINCTL_VREF_HIZ) pincfg |= AC_PINCTL_OUT_EN; else pincfg |= AC_PINCTL_IN_EN; error = snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, pincfg); if (error < 0) return error; else return 1; } static unsigned int stac92xx_vref_get(struct hda_codec *codec, hda_nid_t nid) { unsigned int vref; vref = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0); vref &= AC_PINCTL_VREFEN; return vref; } static int stac92xx_mux_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; return snd_hda_input_mux_info(spec->input_mux, uinfo); } static int stac92xx_mux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); ucontrol->value.enumerated.item[0] = spec->cur_mux[adc_idx]; return 0; } static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); const struct hda_input_mux *imux = spec->input_mux; unsigned int idx, prev_idx, didx; idx = ucontrol->value.enumerated.item[0]; if (idx >= imux->num_items) idx = imux->num_items - 1; prev_idx = spec->cur_mux[adc_idx]; if (prev_idx == idx) return 0; if (idx < spec->num_analog_muxes) { snd_hda_codec_write_cache(codec, spec->mux_nids[adc_idx], 0, AC_VERB_SET_CONNECT_SEL, imux->items[idx].index); if (prev_idx >= spec->num_analog_muxes && spec->mux_nids[adc_idx] != spec->dmux_nids[adc_idx]) { imux = spec->dinput_mux; /* 0 = analog */ snd_hda_codec_write_cache(codec, spec->dmux_nids[adc_idx], 0, AC_VERB_SET_CONNECT_SEL, imux->items[0].index); } } else { imux = spec->dinput_mux; /* first dimux item is hardcoded to select analog imux, * so lets skip it */ didx = idx - spec->num_analog_muxes + 1; snd_hda_codec_write_cache(codec, spec->dmux_nids[adc_idx], 0, AC_VERB_SET_CONNECT_SEL, imux->items[didx].index); } spec->cur_mux[adc_idx] = idx; return 1; } static int stac92xx_mono_mux_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; return snd_hda_input_mux_info(spec->mono_mux, uinfo); } static int stac92xx_mono_mux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; ucontrol->value.enumerated.item[0] = spec->cur_mmux; return 0; } static int stac92xx_mono_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; return snd_hda_input_mux_put(codec, spec->mono_mux, ucontrol, spec->mono_nid, &spec->cur_mmux); } #define stac92xx_aloopback_info snd_ctl_boolean_mono_info static int stac92xx_aloopback_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); struct sigmatel_spec *spec = codec->spec; ucontrol->value.integer.value[0] = !!(spec->aloopback & (spec->aloopback_mask << idx)); return 0; } static int stac92xx_aloopback_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); unsigned int dac_mode; unsigned int val, idx_val; idx_val = spec->aloopback_mask << idx; if (ucontrol->value.integer.value[0]) val = spec->aloopback | idx_val; else val = spec->aloopback & ~idx_val; if (spec->aloopback == val) return 0; spec->aloopback = val; /* Only return the bits defined by the shift value of the * first two bytes of the mask */ dac_mode = snd_hda_codec_read(codec, codec->afg, 0, kcontrol->private_value & 0xFFFF, 0x0); dac_mode >>= spec->aloopback_shift; if (spec->aloopback & idx_val) { snd_hda_power_up(codec); dac_mode |= idx_val; } else { snd_hda_power_down(codec); dac_mode &= ~idx_val; } snd_hda_codec_write_cache(codec, codec->afg, 0, kcontrol->private_value >> 16, dac_mode); return 1; } static const struct hda_verb stac9200_core_init[] = { /* set dac0mux for dac converter */ { 0x07, AC_VERB_SET_CONNECT_SEL, 0x00}, {} }; static const struct hda_verb stac9200_eapd_init[] = { /* set dac0mux for dac converter */ {0x07, AC_VERB_SET_CONNECT_SEL, 0x00}, {0x08, AC_VERB_SET_EAPD_BTLENABLE, 0x02}, {} }; static const struct hda_verb dell_eq_core_init[] = { /* set master volume to max value without distortion * and direct control */ { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xec}, {} }; static const struct hda_verb stac92hd73xx_core_init[] = { /* set master volume and direct control */ { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff}, {} }; static const struct hda_verb stac92hd83xxx_core_init[] = { /* power state controls amps */ { 0x01, AC_VERB_SET_EAPD, 1 << 2}, {} }; static const struct hda_verb stac92hd83xxx_hp_zephyr_init[] = { { 0x22, 0x785, 0x43 }, { 0x22, 0x782, 0xe0 }, { 0x22, 0x795, 0x00 }, {} }; static const struct hda_verb stac92hd71bxx_core_init[] = { /* set master volume and direct control */ { 0x28, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff}, {} }; static const struct hda_verb stac92hd71bxx_unmute_core_init[] = { /* unmute right and left channels for nodes 0x0f, 0xa, 0x0d */ { 0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, { 0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, { 0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, {} }; static const struct hda_verb stac925x_core_init[] = { /* set dac0mux for dac converter */ { 0x06, AC_VERB_SET_CONNECT_SEL, 0x00}, /* mute the master volume */ { 0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE }, {} }; static const struct hda_verb stac922x_core_init[] = { /* set master volume and direct control */ { 0x16, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff}, {} }; static const struct hda_verb d965_core_init[] = { /* set master volume and direct control */ { 0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff}, /* unmute node 0x1b */ { 0x1b, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000}, /* select node 0x03 as DAC */ { 0x0b, AC_VERB_SET_CONNECT_SEL, 0x01}, {} }; static const struct hda_verb dell_3st_core_init[] = { /* don't set delta bit */ {0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0x7f}, /* unmute node 0x1b */ {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000}, /* select node 0x03 as DAC */ {0x0b, AC_VERB_SET_CONNECT_SEL, 0x01}, {} }; static const struct hda_verb stac927x_core_init[] = { /* set master volume and direct control */ { 0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff}, /* enable analog pc beep path */ { 0x01, AC_VERB_SET_DIGI_CONVERT_2, 1 << 5}, {} }; static const struct hda_verb stac927x_volknob_core_init[] = { /* don't set delta bit */ {0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0x7f}, /* enable analog pc beep path */ {0x01, AC_VERB_SET_DIGI_CONVERT_2, 1 << 5}, {} }; static const struct hda_verb stac9205_core_init[] = { /* set master volume and direct control */ { 0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff}, /* enable analog pc beep path */ { 0x01, AC_VERB_SET_DIGI_CONVERT_2, 1 << 5}, {} }; #define STAC_MONO_MUX \ { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = "Mono Mux", \ .count = 1, \ .info = stac92xx_mono_mux_enum_info, \ .get = stac92xx_mono_mux_enum_get, \ .put = stac92xx_mono_mux_enum_put, \ } #define STAC_ANALOG_LOOPBACK(verb_read, verb_write, cnt) \ { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = "Analog Loopback", \ .count = cnt, \ .info = stac92xx_aloopback_info, \ .get = stac92xx_aloopback_get, \ .put = stac92xx_aloopback_put, \ .private_value = verb_read | (verb_write << 16), \ } #define DC_BIAS(xname, idx, nid) \ { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = xname, \ .index = idx, \ .info = stac92xx_dc_bias_info, \ .get = stac92xx_dc_bias_get, \ .put = stac92xx_dc_bias_put, \ .private_value = nid, \ } static const struct snd_kcontrol_new stac9200_mixer[] = { HDA_CODEC_VOLUME_MIN_MUTE("PCM Playback Volume", 0xb, 0, HDA_OUTPUT), HDA_CODEC_MUTE("PCM Playback Switch", 0xb, 0, HDA_OUTPUT), HDA_CODEC_VOLUME("Capture Volume", 0x0a, 0, HDA_OUTPUT), HDA_CODEC_MUTE("Capture Switch", 0x0a, 0, HDA_OUTPUT), { } /* end */ }; static const struct snd_kcontrol_new stac92hd73xx_6ch_loopback[] = { STAC_ANALOG_LOOPBACK(0xFA0, 0x7A1, 3), {} }; static const struct snd_kcontrol_new stac92hd73xx_8ch_loopback[] = { STAC_ANALOG_LOOPBACK(0xFA0, 0x7A1, 4), {} }; static const struct snd_kcontrol_new stac92hd73xx_10ch_loopback[] = { STAC_ANALOG_LOOPBACK(0xFA0, 0x7A1, 5), {} }; static const struct snd_kcontrol_new stac92hd71bxx_loopback[] = { STAC_ANALOG_LOOPBACK(0xFA0, 0x7A0, 2) }; static const struct snd_kcontrol_new stac925x_mixer[] = { HDA_CODEC_VOLUME_MIN_MUTE("PCM Playback Volume", 0xe, 0, HDA_OUTPUT), HDA_CODEC_MUTE("PCM Playback Switch", 0x0e, 0, HDA_OUTPUT), { } /* end */ }; static const struct snd_kcontrol_new stac9205_loopback[] = { STAC_ANALOG_LOOPBACK(0xFE0, 0x7E0, 1), {} }; static const struct snd_kcontrol_new stac927x_loopback[] = { STAC_ANALOG_LOOPBACK(0xFEB, 0x7EB, 1), {} }; static struct snd_kcontrol_new stac_dmux_mixer = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Digital Input Source", /* count set later */ .info = stac92xx_dmux_enum_info, .get = stac92xx_dmux_enum_get, .put = stac92xx_dmux_enum_put, }; static struct snd_kcontrol_new stac_smux_mixer = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "IEC958 Playback Source", /* count set later */ .info = stac92xx_smux_enum_info, .get = stac92xx_smux_enum_get, .put = stac92xx_smux_enum_put, }; static const char * const slave_pfxs[] = { "Front", "Surround", "Center", "LFE", "Side", "Headphone", "Speaker", "IEC958", NULL }; static void stac92xx_update_led_status(struct hda_codec *codec, int enabled); static void stac92xx_vmaster_hook(void *private_data, int val) { stac92xx_update_led_status(private_data, val); } static void stac92xx_free_kctls(struct hda_codec *codec); static int stac92xx_build_controls(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; unsigned int vmaster_tlv[4]; int err; int i; if (spec->mixer) { err = snd_hda_add_new_ctls(codec, spec->mixer); if (err < 0) return err; } for (i = 0; i < spec->num_mixers; i++) { err = snd_hda_add_new_ctls(codec, spec->mixers[i]); if (err < 0) return err; } if (!spec->auto_mic && spec->num_dmuxes > 0 && snd_hda_get_bool_hint(codec, "separate_dmux") == 1) { stac_dmux_mixer.count = spec->num_dmuxes; err = snd_hda_ctl_add(codec, 0, snd_ctl_new1(&stac_dmux_mixer, codec)); if (err < 0) return err; } if (spec->num_smuxes > 0) { int wcaps = get_wcaps(codec, spec->multiout.dig_out_nid); struct hda_input_mux *smux = &spec->private_smux; /* check for mute support on SPDIF out */ if (wcaps & AC_WCAP_OUT_AMP) { snd_hda_add_imux_item(smux, "Off", 0, NULL); spec->spdif_mute = 1; } stac_smux_mixer.count = spec->num_smuxes; err = snd_hda_ctl_add(codec, 0, snd_ctl_new1(&stac_smux_mixer, codec)); if (err < 0) return err; } if (spec->multiout.dig_out_nid) { err = snd_hda_create_spdif_out_ctls(codec, spec->multiout.dig_out_nid, spec->multiout.dig_out_nid); if (err < 0) return err; err = snd_hda_create_spdif_share_sw(codec, &spec->multiout); if (err < 0) return err; spec->multiout.share_spdif = 1; } if (spec->dig_in_nid && !(spec->gpio_dir & 0x01)) { err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in_nid); if (err < 0) return err; } /* if we have no master control, let's create it */ snd_hda_set_vmaster_tlv(codec, spec->multiout.dac_nids[0], HDA_OUTPUT, vmaster_tlv); /* correct volume offset */ vmaster_tlv[2] += vmaster_tlv[3] * spec->volume_offset; /* minimum value is actually mute */ vmaster_tlv[3] |= TLV_DB_SCALE_MUTE; err = snd_hda_add_vmaster(codec, "Master Playback Volume", vmaster_tlv, slave_pfxs, "Playback Volume"); if (err < 0) return err; err = __snd_hda_add_vmaster(codec, "Master Playback Switch", NULL, slave_pfxs, "Playback Switch", true, &spec->vmaster_mute.sw_kctl); if (err < 0) return err; if (spec->gpio_led) { spec->vmaster_mute.hook = stac92xx_vmaster_hook; err = snd_hda_add_vmaster_hook(codec, &spec->vmaster_mute, true); if (err < 0) return err; } if (spec->aloopback_ctl && snd_hda_get_bool_hint(codec, "loopback") == 1) { err = snd_hda_add_new_ctls(codec, spec->aloopback_ctl); if (err < 0) return err; } stac92xx_free_kctls(codec); /* no longer needed */ err = snd_hda_jack_add_kctls(codec, &spec->autocfg); if (err < 0) return err; return 0; } static const unsigned int ref9200_pin_configs[8] = { 0x01c47010, 0x01447010, 0x0221401f, 0x01114010, 0x02a19020, 0x01a19021, 0x90100140, 0x01813122, }; static const unsigned int gateway9200_m4_pin_configs[8] = { 0x400000fe, 0x404500f4, 0x400100f0, 0x90110010, 0x400100f1, 0x02a1902e, 0x500000f2, 0x500000f3, }; static const unsigned int gateway9200_m4_2_pin_configs[8] = { 0x400000fe, 0x404500f4, 0x400100f0, 0x90110010, 0x400100f1, 0x02a1902e, 0x500000f2, 0x500000f3, }; /* STAC 9200 pin configs for 102801A8 102801DE 102801E8 */ static const unsigned int dell9200_d21_pin_configs[8] = { 0x400001f0, 0x400001f1, 0x02214030, 0x01014010, 0x02a19020, 0x01a19021, 0x90100140, 0x01813122, }; /* STAC 9200 pin configs for 102801C0 102801C1 */ static const unsigned int dell9200_d22_pin_configs[8] = { 0x400001f0, 0x400001f1, 0x0221401f, 0x01014010, 0x01813020, 0x02a19021, 0x90100140, 0x400001f2, }; /* STAC 9200 pin configs for 102801C4 (Dell Dimension E310) 102801C5 102801C7 102801D9 102801DA 102801E3 */ static const unsigned int dell9200_d23_pin_configs[8] = { 0x400001f0, 0x400001f1, 0x0221401f, 0x01014010, 0x01813020, 0x01a19021, 0x90100140, 0x400001f2, }; /* STAC 9200-32 pin configs for 102801B5 (Dell Inspiron 630m) 102801D8 (Dell Inspiron 640m) */ static const unsigned int dell9200_m21_pin_configs[8] = { 0x40c003fa, 0x03441340, 0x0321121f, 0x90170310, 0x408003fb, 0x03a11020, 0x401003fc, 0x403003fd, }; /* STAC 9200-32 pin configs for 102801C2 (Dell Latitude D620) 102801C8 102801CC (Dell Latitude D820) 102801D4 102801D6 */ static const unsigned int dell9200_m22_pin_configs[8] = { 0x40c003fa, 0x0144131f, 0x0321121f, 0x90170310, 0x90a70321, 0x03a11020, 0x401003fb, 0x40f000fc, }; /* STAC 9200-32 pin configs for 102801CE (Dell XPS M1710) 102801CF (Dell Precision M90) */ static const unsigned int dell9200_m23_pin_configs[8] = { 0x40c003fa, 0x01441340, 0x0421421f, 0x90170310, 0x408003fb, 0x04a1102e, 0x90170311, 0x403003fc, }; /* STAC 9200-32 pin configs for 102801C9 102801CA 102801CB (Dell Latitude 120L) 102801D3 */ static const unsigned int dell9200_m24_pin_configs[8] = { 0x40c003fa, 0x404003fb, 0x0321121f, 0x90170310, 0x408003fc, 0x03a11020, 0x401003fd, 0x403003fe, }; /* STAC 9200-32 pin configs for 102801BD (Dell Inspiron E1505n) 102801EE 102801EF */ static const unsigned int dell9200_m25_pin_configs[8] = { 0x40c003fa, 0x01441340, 0x0421121f, 0x90170310, 0x408003fb, 0x04a11020, 0x401003fc, 0x403003fd, }; /* STAC 9200-32 pin configs for 102801F5 (Dell Inspiron 1501) 102801F6 */ static const unsigned int dell9200_m26_pin_configs[8] = { 0x40c003fa, 0x404003fb, 0x0421121f, 0x90170310, 0x408003fc, 0x04a11020, 0x401003fd, 0x403003fe, }; /* STAC 9200-32 102801CD (Dell Inspiron E1705/9400) */ static const unsigned int dell9200_m27_pin_configs[8] = { 0x40c003fa, 0x01441340, 0x0421121f, 0x90170310, 0x90170310, 0x04a11020, 0x90170310, 0x40f003fc, }; static const unsigned int oqo9200_pin_configs[8] = { 0x40c000f0, 0x404000f1, 0x0221121f, 0x02211210, 0x90170111, 0x90a70120, 0x400000f2, 0x400000f3, }; static const unsigned int *stac9200_brd_tbl[STAC_9200_MODELS] = { [STAC_REF] = ref9200_pin_configs, [STAC_9200_OQO] = oqo9200_pin_configs, [STAC_9200_DELL_D21] = dell9200_d21_pin_configs, [STAC_9200_DELL_D22] = dell9200_d22_pin_configs, [STAC_9200_DELL_D23] = dell9200_d23_pin_configs, [STAC_9200_DELL_M21] = dell9200_m21_pin_configs, [STAC_9200_DELL_M22] = dell9200_m22_pin_configs, [STAC_9200_DELL_M23] = dell9200_m23_pin_configs, [STAC_9200_DELL_M24] = dell9200_m24_pin_configs, [STAC_9200_DELL_M25] = dell9200_m25_pin_configs, [STAC_9200_DELL_M26] = dell9200_m26_pin_configs, [STAC_9200_DELL_M27] = dell9200_m27_pin_configs, [STAC_9200_M4] = gateway9200_m4_pin_configs, [STAC_9200_M4_2] = gateway9200_m4_2_pin_configs, [STAC_9200_PANASONIC] = ref9200_pin_configs, }; static const char * const stac9200_models[STAC_9200_MODELS] = { [STAC_AUTO] = "auto", [STAC_REF] = "ref", [STAC_9200_OQO] = "oqo", [STAC_9200_DELL_D21] = "dell-d21", [STAC_9200_DELL_D22] = "dell-d22", [STAC_9200_DELL_D23] = "dell-d23", [STAC_9200_DELL_M21] = "dell-m21", [STAC_9200_DELL_M22] = "dell-m22", [STAC_9200_DELL_M23] = "dell-m23", [STAC_9200_DELL_M24] = "dell-m24", [STAC_9200_DELL_M25] = "dell-m25", [STAC_9200_DELL_M26] = "dell-m26", [STAC_9200_DELL_M27] = "dell-m27", [STAC_9200_M4] = "gateway-m4", [STAC_9200_M4_2] = "gateway-m4-2", [STAC_9200_PANASONIC] = "panasonic", }; static const struct snd_pci_quirk stac9200_cfg_tbl[] = { /* SigmaTel reference board */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, "DFI LanParty", STAC_REF), /* Dell laptops have BIOS problem */ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01a8, "unknown Dell", STAC_9200_DELL_D21), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01b5, "Dell Inspiron 630m", STAC_9200_DELL_M21), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01bd, "Dell Inspiron E1505n", STAC_9200_DELL_M25), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01c0, "unknown Dell", STAC_9200_DELL_D22), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01c1, "unknown Dell", STAC_9200_DELL_D22), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01c2, "Dell Latitude D620", STAC_9200_DELL_M22), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01c5, "unknown Dell", STAC_9200_DELL_D23), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01c7, "unknown Dell", STAC_9200_DELL_D23), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01c8, "unknown Dell", STAC_9200_DELL_M22), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01c9, "unknown Dell", STAC_9200_DELL_M24), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ca, "unknown Dell", STAC_9200_DELL_M24), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01cb, "Dell Latitude 120L", STAC_9200_DELL_M24), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01cc, "Dell Latitude D820", STAC_9200_DELL_M22), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01cd, "Dell Inspiron E1705/9400", STAC_9200_DELL_M27), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ce, "Dell XPS M1710", STAC_9200_DELL_M23), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01cf, "Dell Precision M90", STAC_9200_DELL_M23), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d3, "unknown Dell", STAC_9200_DELL_M22), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d4, "unknown Dell", STAC_9200_DELL_M22), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d6, "unknown Dell", STAC_9200_DELL_M22), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d8, "Dell Inspiron 640m", STAC_9200_DELL_M21), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d9, "unknown Dell", STAC_9200_DELL_D23), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01da, "unknown Dell", STAC_9200_DELL_D23), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01de, "unknown Dell", STAC_9200_DELL_D21), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01e3, "unknown Dell", STAC_9200_DELL_D23), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01e8, "unknown Dell", STAC_9200_DELL_D21), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ee, "unknown Dell", STAC_9200_DELL_M25), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ef, "unknown Dell", STAC_9200_DELL_M25), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f5, "Dell Inspiron 1501", STAC_9200_DELL_M26), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f6, "unknown Dell", STAC_9200_DELL_M26), /* Panasonic */ SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-74", STAC_9200_PANASONIC), /* Gateway machines needs EAPD to be set on resume */ SND_PCI_QUIRK(0x107b, 0x0205, "Gateway S-7110M", STAC_9200_M4), SND_PCI_QUIRK(0x107b, 0x0317, "Gateway MT3423, MX341*", STAC_9200_M4_2), SND_PCI_QUIRK(0x107b, 0x0318, "Gateway ML3019, MT3707", STAC_9200_M4_2), /* OQO Mobile */ SND_PCI_QUIRK(0x1106, 0x3288, "OQO Model 2", STAC_9200_OQO), {} /* terminator */ }; static const unsigned int ref925x_pin_configs[8] = { 0x40c003f0, 0x424503f2, 0x01813022, 0x02a19021, 0x90a70320, 0x02214210, 0x01019020, 0x9033032e, }; static const unsigned int stac925xM1_pin_configs[8] = { 0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020, 0x40a000f0, 0x90100210, 0x400003f1, 0x9033032e, }; static const unsigned int stac925xM1_2_pin_configs[8] = { 0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020, 0x40a000f0, 0x90100210, 0x400003f1, 0x9033032e, }; static const unsigned int stac925xM2_pin_configs[8] = { 0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020, 0x40a000f0, 0x90100210, 0x400003f1, 0x9033032e, }; static const unsigned int stac925xM2_2_pin_configs[8] = { 0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020, 0x40a000f0, 0x90100210, 0x400003f1, 0x9033032e, }; static const unsigned int stac925xM3_pin_configs[8] = { 0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020, 0x40a000f0, 0x90100210, 0x400003f1, 0x503303f3, }; static const unsigned int stac925xM5_pin_configs[8] = { 0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020, 0x40a000f0, 0x90100210, 0x400003f1, 0x9033032e, }; static const unsigned int stac925xM6_pin_configs[8] = { 0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020, 0x40a000f0, 0x90100210, 0x400003f1, 0x90330320, }; static const unsigned int *stac925x_brd_tbl[STAC_925x_MODELS] = { [STAC_REF] = ref925x_pin_configs, [STAC_M1] = stac925xM1_pin_configs, [STAC_M1_2] = stac925xM1_2_pin_configs, [STAC_M2] = stac925xM2_pin_configs, [STAC_M2_2] = stac925xM2_2_pin_configs, [STAC_M3] = stac925xM3_pin_configs, [STAC_M5] = stac925xM5_pin_configs, [STAC_M6] = stac925xM6_pin_configs, }; static const char * const stac925x_models[STAC_925x_MODELS] = { [STAC_925x_AUTO] = "auto", [STAC_REF] = "ref", [STAC_M1] = "m1", [STAC_M1_2] = "m1-2", [STAC_M2] = "m2", [STAC_M2_2] = "m2-2", [STAC_M3] = "m3", [STAC_M5] = "m5", [STAC_M6] = "m6", }; static const struct snd_pci_quirk stac925x_codec_id_cfg_tbl[] = { SND_PCI_QUIRK(0x107b, 0x0316, "Gateway M255", STAC_M2), SND_PCI_QUIRK(0x107b, 0x0366, "Gateway MP6954", STAC_M5), SND_PCI_QUIRK(0x107b, 0x0461, "Gateway NX560XL", STAC_M1), SND_PCI_QUIRK(0x107b, 0x0681, "Gateway NX860", STAC_M2), SND_PCI_QUIRK(0x107b, 0x0367, "Gateway MX6453", STAC_M1_2), /* Not sure about the brand name for those */ SND_PCI_QUIRK(0x107b, 0x0281, "Gateway mobile", STAC_M1), SND_PCI_QUIRK(0x107b, 0x0507, "Gateway mobile", STAC_M3), SND_PCI_QUIRK(0x107b, 0x0281, "Gateway mobile", STAC_M6), SND_PCI_QUIRK(0x107b, 0x0685, "Gateway mobile", STAC_M2_2), {} /* terminator */ }; static const struct snd_pci_quirk stac925x_cfg_tbl[] = { /* SigmaTel reference board */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, "DFI LanParty", STAC_REF), SND_PCI_QUIRK(0x8384, 0x7632, "Stac9202 Reference Board", STAC_REF), /* Default table for unknown ID */ SND_PCI_QUIRK(0x1002, 0x437b, "Gateway mobile", STAC_M2_2), {} /* terminator */ }; static const unsigned int ref92hd73xx_pin_configs[13] = { 0x02214030, 0x02a19040, 0x01a19020, 0x02214030, 0x0181302e, 0x01014010, 0x01014020, 0x01014030, 0x02319040, 0x90a000f0, 0x90a000f0, 0x01452050, 0x01452050, }; static const unsigned int dell_m6_pin_configs[13] = { 0x0321101f, 0x4f00000f, 0x4f0000f0, 0x90170110, 0x03a11020, 0x0321101f, 0x4f0000f0, 0x4f0000f0, 0x4f0000f0, 0x90a60160, 0x4f0000f0, 0x4f0000f0, 0x4f0000f0, }; static const unsigned int alienware_m17x_pin_configs[13] = { 0x0321101f, 0x0321101f, 0x03a11020, 0x03014020, 0x90170110, 0x4f0000f0, 0x4f0000f0, 0x4f0000f0, 0x4f0000f0, 0x90a60160, 0x4f0000f0, 0x4f0000f0, 0x904601b0, }; static const unsigned int intel_dg45id_pin_configs[13] = { 0x02214230, 0x02A19240, 0x01013214, 0x01014210, 0x01A19250, 0x01011212, 0x01016211 }; static const unsigned int *stac92hd73xx_brd_tbl[STAC_92HD73XX_MODELS] = { [STAC_92HD73XX_REF] = ref92hd73xx_pin_configs, [STAC_DELL_M6_AMIC] = dell_m6_pin_configs, [STAC_DELL_M6_DMIC] = dell_m6_pin_configs, [STAC_DELL_M6_BOTH] = dell_m6_pin_configs, [STAC_DELL_EQ] = dell_m6_pin_configs, [STAC_ALIENWARE_M17X] = alienware_m17x_pin_configs, [STAC_92HD73XX_INTEL] = intel_dg45id_pin_configs, }; static const char * const stac92hd73xx_models[STAC_92HD73XX_MODELS] = { [STAC_92HD73XX_AUTO] = "auto", [STAC_92HD73XX_NO_JD] = "no-jd", [STAC_92HD73XX_REF] = "ref", [STAC_92HD73XX_INTEL] = "intel", [STAC_DELL_M6_AMIC] = "dell-m6-amic", [STAC_DELL_M6_DMIC] = "dell-m6-dmic", [STAC_DELL_M6_BOTH] = "dell-m6", [STAC_DELL_EQ] = "dell-eq", [STAC_ALIENWARE_M17X] = "alienware", }; static const struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = { /* SigmaTel reference board */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_92HD73XX_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, "DFI LanParty", STAC_92HD73XX_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5002, "Intel DG45ID", STAC_92HD73XX_INTEL), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5003, "Intel DG45FC", STAC_92HD73XX_INTEL), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0254, "Dell Studio 1535", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0255, "unknown Dell", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0256, "unknown Dell", STAC_DELL_M6_BOTH), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0257, "unknown Dell", STAC_DELL_M6_BOTH), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x025e, "unknown Dell", STAC_DELL_M6_AMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x025f, "unknown Dell", STAC_DELL_M6_AMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0271, "unknown Dell", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0272, "unknown Dell", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x029f, "Dell Studio 1537", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02a0, "Dell Studio 17", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02be, "Dell Studio 1555", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd, "Dell Studio 1557", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe, "Dell Studio XPS 1645", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413, "Dell Studio 1558", STAC_DELL_M6_DMIC), {} /* terminator */ }; static const struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = { SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02a1, "Alienware M17x", STAC_ALIENWARE_M17X), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a, "Alienware M17x", STAC_ALIENWARE_M17X), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490, "Alienware M17x R3", STAC_DELL_EQ), {} /* terminator */ }; static const unsigned int ref92hd83xxx_pin_configs[10] = { 0x02214030, 0x02211010, 0x02a19020, 0x02170130, 0x01014050, 0x01819040, 0x01014020, 0x90a3014e, 0x01451160, 0x98560170, }; static const unsigned int dell_s14_pin_configs[10] = { 0x0221403f, 0x0221101f, 0x02a19020, 0x90170110, 0x40f000f0, 0x40f000f0, 0x40f000f0, 0x90a60160, 0x40f000f0, 0x40f000f0, }; static const unsigned int dell_vostro_3500_pin_configs[10] = { 0x02a11020, 0x0221101f, 0x400000f0, 0x90170110, 0x400000f1, 0x400000f2, 0x400000f3, 0x90a60160, 0x400000f4, 0x400000f5, }; static const unsigned int hp_dv7_4000_pin_configs[10] = { 0x03a12050, 0x0321201f, 0x40f000f0, 0x90170110, 0x40f000f0, 0x40f000f0, 0x90170110, 0xd5a30140, 0x40f000f0, 0x40f000f0, }; static const unsigned int hp_zephyr_pin_configs[10] = { 0x01813050, 0x0421201f, 0x04a1205e, 0x96130310, 0x96130310, 0x0101401f, 0x1111611f, 0xd5a30130, 0, 0, }; static const unsigned int hp_cNB11_intquad_pin_configs[10] = { 0x40f000f0, 0x0221101f, 0x02a11020, 0x92170110, 0x40f000f0, 0x92170110, 0x40f000f0, 0xd5a30130, 0x40f000f0, 0x40f000f0, }; static const unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = { [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs, [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs, [STAC_DELL_S14] = dell_s14_pin_configs, [STAC_DELL_VOSTRO_3500] = dell_vostro_3500_pin_configs, [STAC_92HD83XXX_HP_cNB11_INTQUAD] = hp_cNB11_intquad_pin_configs, [STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs, [STAC_HP_ZEPHYR] = hp_zephyr_pin_configs, }; static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = { [STAC_92HD83XXX_AUTO] = "auto", [STAC_92HD83XXX_REF] = "ref", [STAC_92HD83XXX_PWR_REF] = "mic-ref", [STAC_DELL_S14] = "dell-s14", [STAC_DELL_VOSTRO_3500] = "dell-vostro-3500", [STAC_92HD83XXX_HP_cNB11_INTQUAD] = "hp_cNB11_intquad", [STAC_HP_DV7_4000] = "hp-dv7-4000", [STAC_HP_ZEPHYR] = "hp-zephyr", }; static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = { /* SigmaTel reference board */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_92HD83XXX_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, "DFI LanParty", STAC_92HD83XXX_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba, "unknown Dell", STAC_DELL_S14), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x1028, "Dell Vostro 3500", STAC_DELL_VOSTRO_3500), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1656, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1657, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1658, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1659, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165A, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165B, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3388, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3389, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x355B, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x355C, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x355D, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x355E, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x355F, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3560, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x358B, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x358C, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x358D, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3591, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3592, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3593, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3561, "HP", STAC_HP_ZEPHYR), {} /* terminator */ }; static const struct snd_pci_quirk stac92hd83xxx_codec_id_cfg_tbl[] = { SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3561, "HP", STAC_HP_ZEPHYR), {} /* terminator */ }; static const unsigned int ref92hd71bxx_pin_configs[STAC92HD71BXX_NUM_PINS] = { 0x02214030, 0x02a19040, 0x01a19020, 0x01014010, 0x0181302e, 0x01014010, 0x01019020, 0x90a000f0, 0x90a000f0, 0x01452050, 0x01452050, 0x00000000, 0x00000000 }; static const unsigned int dell_m4_1_pin_configs[STAC92HD71BXX_NUM_PINS] = { 0x0421101f, 0x04a11221, 0x40f000f0, 0x90170110, 0x23a1902e, 0x23014250, 0x40f000f0, 0x90a000f0, 0x40f000f0, 0x4f0000f0, 0x4f0000f0, 0x00000000, 0x00000000 }; static const unsigned int dell_m4_2_pin_configs[STAC92HD71BXX_NUM_PINS] = { 0x0421101f, 0x04a11221, 0x90a70330, 0x90170110, 0x23a1902e, 0x23014250, 0x40f000f0, 0x40f000f0, 0x40f000f0, 0x044413b0, 0x044413b0, 0x00000000, 0x00000000 }; static const unsigned int dell_m4_3_pin_configs[STAC92HD71BXX_NUM_PINS] = { 0x0421101f, 0x04a11221, 0x90a70330, 0x90170110, 0x40f000f0, 0x40f000f0, 0x40f000f0, 0x90a000f0, 0x40f000f0, 0x044413b0, 0x044413b0, 0x00000000, 0x00000000 }; static const unsigned int *stac92hd71bxx_brd_tbl[STAC_92HD71BXX_MODELS] = { [STAC_92HD71BXX_REF] = ref92hd71bxx_pin_configs, [STAC_DELL_M4_1] = dell_m4_1_pin_configs, [STAC_DELL_M4_2] = dell_m4_2_pin_configs, [STAC_DELL_M4_3] = dell_m4_3_pin_configs, [STAC_HP_M4] = NULL, [STAC_HP_DV4] = NULL, [STAC_HP_DV5] = NULL, [STAC_HP_HDX] = NULL, [STAC_HP_DV4_1222NR] = NULL, }; static const char * const stac92hd71bxx_models[STAC_92HD71BXX_MODELS] = { [STAC_92HD71BXX_AUTO] = "auto", [STAC_92HD71BXX_REF] = "ref", [STAC_DELL_M4_1] = "dell-m4-1", [STAC_DELL_M4_2] = "dell-m4-2", [STAC_DELL_M4_3] = "dell-m4-3", [STAC_HP_M4] = "hp-m4", [STAC_HP_DV4] = "hp-dv4", [STAC_HP_DV5] = "hp-dv5", [STAC_HP_HDX] = "hp-hdx", [STAC_HP_DV4_1222NR] = "hp-dv4-1222nr", }; static const struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = { /* SigmaTel reference board */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_92HD71BXX_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, "DFI LanParty", STAC_92HD71BXX_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30fb, "HP dv4-1222nr", STAC_HP_DV4_1222NR), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x1720, "HP", STAC_HP_DV5), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3080, "HP", STAC_HP_DV5), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x30f0, "HP dv4-7", STAC_HP_DV4), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3600, "HP dv4-7", STAC_HP_DV5), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3610, "HP HDX", STAC_HP_HDX), /* HDX18 */ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x361a, "HP mini 1000", STAC_HP_M4), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x361b, "HP HDX", STAC_HP_HDX), /* HDX16 */ SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3620, "HP dv6", STAC_HP_DV5), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3061, "HP dv6", STAC_HP_DV5), /* HP dv6-1110ax */ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x363e, "HP DV6", STAC_HP_DV5), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010, "HP", STAC_HP_DV5), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233, "unknown Dell", STAC_DELL_M4_1), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0234, "unknown Dell", STAC_DELL_M4_1), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0250, "unknown Dell", STAC_DELL_M4_1), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x024f, "unknown Dell", STAC_DELL_M4_1), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x024d, "unknown Dell", STAC_DELL_M4_1), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0251, "unknown Dell", STAC_DELL_M4_1), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0277, "unknown Dell", STAC_DELL_M4_1), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0263, "unknown Dell", STAC_DELL_M4_2), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0265, "unknown Dell", STAC_DELL_M4_2), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0262, "unknown Dell", STAC_DELL_M4_2), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0264, "unknown Dell", STAC_DELL_M4_2), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02aa, "unknown Dell", STAC_DELL_M4_3), {} /* terminator */ }; static const unsigned int ref922x_pin_configs[10] = { 0x01014010, 0x01016011, 0x01012012, 0x0221401f, 0x01813122, 0x01011014, 0x01441030, 0x01c41030, 0x40000100, 0x40000100, }; /* STAC 922X pin configs for 102801A7 102801AB 102801A9 102801D1 102801D2 */ static const unsigned int dell_922x_d81_pin_configs[10] = { 0x02214030, 0x01a19021, 0x01111012, 0x01114010, 0x02a19020, 0x01117011, 0x400001f0, 0x400001f1, 0x01813122, 0x400001f2, }; /* STAC 922X pin configs for 102801AC 102801D0 */ static const unsigned int dell_922x_d82_pin_configs[10] = { 0x02214030, 0x01a19021, 0x01111012, 0x01114010, 0x02a19020, 0x01117011, 0x01451140, 0x400001f0, 0x01813122, 0x400001f1, }; /* STAC 922X pin configs for 102801BF */ static const unsigned int dell_922x_m81_pin_configs[10] = { 0x0321101f, 0x01112024, 0x01111222, 0x91174220, 0x03a11050, 0x01116221, 0x90a70330, 0x01452340, 0x40C003f1, 0x405003f0, }; /* STAC 9221 A1 pin configs for 102801D7 (Dell XPS M1210) */ static const unsigned int dell_922x_m82_pin_configs[10] = { 0x02211211, 0x408103ff, 0x02a1123e, 0x90100310, 0x408003f1, 0x0221121f, 0x03451340, 0x40c003f2, 0x508003f3, 0x405003f4, }; static const unsigned int d945gtp3_pin_configs[10] = { 0x0221401f, 0x01a19022, 0x01813021, 0x01014010, 0x40000100, 0x40000100, 0x40000100, 0x40000100, 0x02a19120, 0x40000100, }; static const unsigned int d945gtp5_pin_configs[10] = { 0x0221401f, 0x01011012, 0x01813024, 0x01014010, 0x01a19021, 0x01016011, 0x01452130, 0x40000100, 0x02a19320, 0x40000100, }; static const unsigned int intel_mac_v1_pin_configs[10] = { 0x0121e21f, 0x400000ff, 0x9017e110, 0x400000fd, 0x400000fe, 0x0181e020, 0x1145e030, 0x11c5e240, 0x400000fc, 0x400000fb, }; static const unsigned int intel_mac_v2_pin_configs[10] = { 0x0121e21f, 0x90a7012e, 0x9017e110, 0x400000fd, 0x400000fe, 0x0181e020, 0x1145e230, 0x500000fa, 0x400000fc, 0x400000fb, }; static const unsigned int intel_mac_v3_pin_configs[10] = { 0x0121e21f, 0x90a7012e, 0x9017e110, 0x400000fd, 0x400000fe, 0x0181e020, 0x1145e230, 0x11c5e240, 0x400000fc, 0x400000fb, }; static const unsigned int intel_mac_v4_pin_configs[10] = { 0x0321e21f, 0x03a1e02e, 0x9017e110, 0x9017e11f, 0x400000fe, 0x0381e020, 0x1345e230, 0x13c5e240, 0x400000fc, 0x400000fb, }; static const unsigned int intel_mac_v5_pin_configs[10] = { 0x0321e21f, 0x03a1e02e, 0x9017e110, 0x9017e11f, 0x400000fe, 0x0381e020, 0x1345e230, 0x13c5e240, 0x400000fc, 0x400000fb, }; static const unsigned int ecs202_pin_configs[10] = { 0x0221401f, 0x02a19020, 0x01a19020, 0x01114010, 0x408000f0, 0x01813022, 0x074510a0, 0x40c400f1, 0x9037012e, 0x40e000f2, }; static const unsigned int *stac922x_brd_tbl[STAC_922X_MODELS] = { [STAC_D945_REF] = ref922x_pin_configs, [STAC_D945GTP3] = d945gtp3_pin_configs, [STAC_D945GTP5] = d945gtp5_pin_configs, [STAC_INTEL_MAC_V1] = intel_mac_v1_pin_configs, [STAC_INTEL_MAC_V2] = intel_mac_v2_pin_configs, [STAC_INTEL_MAC_V3] = intel_mac_v3_pin_configs, [STAC_INTEL_MAC_V4] = intel_mac_v4_pin_configs, [STAC_INTEL_MAC_V5] = intel_mac_v5_pin_configs, [STAC_INTEL_MAC_AUTO] = intel_mac_v3_pin_configs, /* for backward compatibility */ [STAC_MACMINI] = intel_mac_v3_pin_configs, [STAC_MACBOOK] = intel_mac_v5_pin_configs, [STAC_MACBOOK_PRO_V1] = intel_mac_v3_pin_configs, [STAC_MACBOOK_PRO_V2] = intel_mac_v3_pin_configs, [STAC_IMAC_INTEL] = intel_mac_v2_pin_configs, [STAC_IMAC_INTEL_20] = intel_mac_v3_pin_configs, [STAC_ECS_202] = ecs202_pin_configs, [STAC_922X_DELL_D81] = dell_922x_d81_pin_configs, [STAC_922X_DELL_D82] = dell_922x_d82_pin_configs, [STAC_922X_DELL_M81] = dell_922x_m81_pin_configs, [STAC_922X_DELL_M82] = dell_922x_m82_pin_configs, }; static const char * const stac922x_models[STAC_922X_MODELS] = { [STAC_922X_AUTO] = "auto", [STAC_D945_REF] = "ref", [STAC_D945GTP5] = "5stack", [STAC_D945GTP3] = "3stack", [STAC_INTEL_MAC_V1] = "intel-mac-v1", [STAC_INTEL_MAC_V2] = "intel-mac-v2", [STAC_INTEL_MAC_V3] = "intel-mac-v3", [STAC_INTEL_MAC_V4] = "intel-mac-v4", [STAC_INTEL_MAC_V5] = "intel-mac-v5", [STAC_INTEL_MAC_AUTO] = "intel-mac-auto", /* for backward compatibility */ [STAC_MACMINI] = "macmini", [STAC_MACBOOK] = "macbook", [STAC_MACBOOK_PRO_V1] = "macbook-pro-v1", [STAC_MACBOOK_PRO_V2] = "macbook-pro", [STAC_IMAC_INTEL] = "imac-intel", [STAC_IMAC_INTEL_20] = "imac-intel-20", [STAC_ECS_202] = "ecs202", [STAC_922X_DELL_D81] = "dell-d81", [STAC_922X_DELL_D82] = "dell-d82", [STAC_922X_DELL_M81] = "dell-m81", [STAC_922X_DELL_M82] = "dell-m82", }; static const struct snd_pci_quirk stac922x_cfg_tbl[] = { /* SigmaTel reference board */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_D945_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, "DFI LanParty", STAC_D945_REF), /* Intel 945G based systems */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0101, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0202, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0606, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0601, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0111, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x1115, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x1116, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x1117, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x1118, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x1119, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x8826, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5049, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5055, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5048, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0110, "Intel D945G", STAC_D945GTP3), /* Intel D945G 5-stack systems */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0404, "Intel D945G", STAC_D945GTP5), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0303, "Intel D945G", STAC_D945GTP5), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0013, "Intel D945G", STAC_D945GTP5), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0417, "Intel D945G", STAC_D945GTP5), /* Intel 945P based systems */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0b0b, "Intel D945P", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0112, "Intel D945P", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0d0d, "Intel D945P", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0909, "Intel D945P", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0505, "Intel D945P", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0707, "Intel D945P", STAC_D945GTP5), /* other intel */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0204, "Intel D945", STAC_D945_REF), /* other systems */ /* Apple Intel Mac (Mac Mini, MacBook, MacBook Pro...) */ SND_PCI_QUIRK(0x8384, 0x7680, "Mac", STAC_INTEL_MAC_AUTO), /* Dell systems */ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01a7, "unknown Dell", STAC_922X_DELL_D81), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01a9, "unknown Dell", STAC_922X_DELL_D81), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ab, "unknown Dell", STAC_922X_DELL_D81), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ac, "unknown Dell", STAC_922X_DELL_D82), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01bf, "unknown Dell", STAC_922X_DELL_M81), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d0, "unknown Dell", STAC_922X_DELL_D82), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d1, "unknown Dell", STAC_922X_DELL_D81), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d2, "unknown Dell", STAC_922X_DELL_D81), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d7, "Dell XPS M1210", STAC_922X_DELL_M82), /* ECS/PC Chips boards */ SND_PCI_QUIRK_MASK(0x1019, 0xf000, 0x2000, "ECS/PC chips", STAC_ECS_202), {} /* terminator */ }; static const unsigned int ref927x_pin_configs[14] = { 0x02214020, 0x02a19080, 0x0181304e, 0x01014010, 0x01a19040, 0x01011012, 0x01016011, 0x0101201f, 0x183301f0, 0x18a001f0, 0x18a001f0, 0x01442070, 0x01c42190, 0x40000100, }; static const unsigned int d965_3st_pin_configs[14] = { 0x0221401f, 0x02a19120, 0x40000100, 0x01014011, 0x01a19021, 0x01813024, 0x40000100, 0x40000100, 0x40000100, 0x40000100, 0x40000100, 0x40000100, 0x40000100, 0x40000100 }; static const unsigned int d965_5st_pin_configs[14] = { 0x02214020, 0x02a19080, 0x0181304e, 0x01014010, 0x01a19040, 0x01011012, 0x01016011, 0x40000100, 0x40000100, 0x40000100, 0x40000100, 0x01442070, 0x40000100, 0x40000100 }; static const unsigned int d965_5st_no_fp_pin_configs[14] = { 0x40000100, 0x40000100, 0x0181304e, 0x01014010, 0x01a19040, 0x01011012, 0x01016011, 0x40000100, 0x40000100, 0x40000100, 0x40000100, 0x01442070, 0x40000100, 0x40000100 }; static const unsigned int dell_3st_pin_configs[14] = { 0x02211230, 0x02a11220, 0x01a19040, 0x01114210, 0x01111212, 0x01116211, 0x01813050, 0x01112214, 0x403003fa, 0x90a60040, 0x90a60040, 0x404003fb, 0x40c003fc, 0x40000100 }; static const unsigned int *stac927x_brd_tbl[STAC_927X_MODELS] = { [STAC_D965_REF_NO_JD] = ref927x_pin_configs, [STAC_D965_REF] = ref927x_pin_configs, [STAC_D965_3ST] = d965_3st_pin_configs, [STAC_D965_5ST] = d965_5st_pin_configs, [STAC_D965_5ST_NO_FP] = d965_5st_no_fp_pin_configs, [STAC_DELL_3ST] = dell_3st_pin_configs, [STAC_DELL_BIOS] = NULL, [STAC_927X_VOLKNOB] = NULL, }; static const char * const stac927x_models[STAC_927X_MODELS] = { [STAC_927X_AUTO] = "auto", [STAC_D965_REF_NO_JD] = "ref-no-jd", [STAC_D965_REF] = "ref", [STAC_D965_3ST] = "3stack", [STAC_D965_5ST] = "5stack", [STAC_D965_5ST_NO_FP] = "5stack-no-fp", [STAC_DELL_3ST] = "dell-3stack", [STAC_DELL_BIOS] = "dell-bios", [STAC_927X_VOLKNOB] = "volknob", }; static const struct snd_pci_quirk stac927x_cfg_tbl[] = { /* SigmaTel reference board */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_D965_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, "DFI LanParty", STAC_D965_REF), /* Intel 946 based systems */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x3d01, "Intel D946", STAC_D965_3ST), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0xa301, "Intel D946", STAC_D965_3ST), /* 965 based 3 stack systems */ SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2100, "Intel D965", STAC_D965_3ST), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2000, "Intel D965", STAC_D965_3ST), /* Dell 3 stack systems */ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01dd, "Dell Dimension E520", STAC_DELL_3ST), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ed, "Dell ", STAC_DELL_3ST), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f4, "Dell ", STAC_DELL_3ST), /* Dell 3 stack systems with verb table in BIOS */ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0242, "Dell ", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0243, "Dell ", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ff, "Dell ", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0209, "Dell XPS 1330", STAC_DELL_BIOS), /* 965 based 5 stack systems */ SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2300, "Intel D965", STAC_D965_5ST), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2500, "Intel D965", STAC_D965_5ST), /* volume-knob fixes */ SND_PCI_QUIRK_VENDOR(0x10cf, "FSC", STAC_927X_VOLKNOB), {} /* terminator */ }; static const unsigned int ref9205_pin_configs[12] = { 0x40000100, 0x40000100, 0x01016011, 0x01014010, 0x01813122, 0x01a19021, 0x01019020, 0x40000100, 0x90a000f0, 0x90a000f0, 0x01441030, 0x01c41030 }; /* STAC 9205 pin configs for 102801F1 102801F2 102801FC 102801FD 10280204 1028021F 10280228 (Dell Vostro 1500) 10280229 (Dell Vostro 1700) */ static const unsigned int dell_9205_m42_pin_configs[12] = { 0x0321101F, 0x03A11020, 0x400003FA, 0x90170310, 0x400003FB, 0x400003FC, 0x400003FD, 0x40F000F9, 0x90A60330, 0x400003FF, 0x0144131F, 0x40C003FE, }; /* STAC 9205 pin configs for 102801F9 102801FA 102801FE 102801FF (Dell Precision M4300) 10280206 10280200 10280201 */ static const unsigned int dell_9205_m43_pin_configs[12] = { 0x0321101f, 0x03a11020, 0x90a70330, 0x90170310, 0x400000fe, 0x400000ff, 0x400000fd, 0x40f000f9, 0x400000fa, 0x400000fc, 0x0144131f, 0x40c003f8, }; static const unsigned int dell_9205_m44_pin_configs[12] = { 0x0421101f, 0x04a11020, 0x400003fa, 0x90170310, 0x400003fb, 0x400003fc, 0x400003fd, 0x400003f9, 0x90a60330, 0x400003ff, 0x01441340, 0x40c003fe, }; static const unsigned int *stac9205_brd_tbl[STAC_9205_MODELS] = { [STAC_9205_REF] = ref9205_pin_configs, [STAC_9205_DELL_M42] = dell_9205_m42_pin_configs, [STAC_9205_DELL_M43] = dell_9205_m43_pin_configs, [STAC_9205_DELL_M44] = dell_9205_m44_pin_configs, [STAC_9205_EAPD] = NULL, }; static const char * const stac9205_models[STAC_9205_MODELS] = { [STAC_9205_AUTO] = "auto", [STAC_9205_REF] = "ref", [STAC_9205_DELL_M42] = "dell-m42", [STAC_9205_DELL_M43] = "dell-m43", [STAC_9205_DELL_M44] = "dell-m44", [STAC_9205_EAPD] = "eapd", }; static const struct snd_pci_quirk stac9205_cfg_tbl[] = { /* SigmaTel reference board */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_9205_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0xfb30, "SigmaTel", STAC_9205_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, "DFI LanParty", STAC_9205_REF), /* Dell */ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f1, "unknown Dell", STAC_9205_DELL_M42), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f2, "unknown Dell", STAC_9205_DELL_M42), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f8, "Dell Precision", STAC_9205_DELL_M43), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f9, "Dell Precision", STAC_9205_DELL_M43), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01fa, "Dell Precision", STAC_9205_DELL_M43), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01fc, "unknown Dell", STAC_9205_DELL_M42), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01fd, "unknown Dell", STAC_9205_DELL_M42), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01fe, "Dell Precision", STAC_9205_DELL_M43), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ff, "Dell Precision M4300", STAC_9205_DELL_M43), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0204, "unknown Dell", STAC_9205_DELL_M42), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0206, "Dell Precision", STAC_9205_DELL_M43), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x021b, "Dell Precision", STAC_9205_DELL_M43), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x021c, "Dell Precision", STAC_9205_DELL_M43), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x021f, "Dell Inspiron", STAC_9205_DELL_M44), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0228, "Dell Vostro 1500", STAC_9205_DELL_M42), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0229, "Dell Vostro 1700", STAC_9205_DELL_M42), /* Gateway */ SND_PCI_QUIRK(0x107b, 0x0560, "Gateway T6834c", STAC_9205_EAPD), SND_PCI_QUIRK(0x107b, 0x0565, "Gateway T1616", STAC_9205_EAPD), {} /* terminator */ }; static void stac92xx_set_config_regs(struct hda_codec *codec, const unsigned int *pincfgs) { int i; struct sigmatel_spec *spec = codec->spec; if (!pincfgs) return; for (i = 0; i < spec->num_pins; i++) if (spec->pin_nids[i] && pincfgs[i]) snd_hda_codec_set_pincfg(codec, spec->pin_nids[i], pincfgs[i]); } /* * Analog playback callbacks */ static int stac92xx_playback_pcm_open(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; if (spec->stream_delay) msleep(spec->stream_delay); return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream, hinfo); } static int stac92xx_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; return snd_hda_multi_out_analog_prepare(codec, &spec->multiout, stream_tag, format, substream); } static int stac92xx_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout); } /* * Digital playback callbacks */ static int stac92xx_dig_playback_pcm_open(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; return snd_hda_multi_out_dig_open(codec, &spec->multiout); } static int stac92xx_dig_playback_pcm_close(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; return snd_hda_multi_out_dig_close(codec, &spec->multiout); } static int stac92xx_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; return snd_hda_multi_out_dig_prepare(codec, &spec->multiout, stream_tag, format, substream); } static int stac92xx_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout); } /* * Analog capture callbacks */ static int stac92xx_capture_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; hda_nid_t nid = spec->adc_nids[substream->number]; if (spec->powerdown_adcs) { msleep(40); snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE, AC_PWRST_D0); } snd_hda_codec_setup_stream(codec, nid, stream_tag, 0, format); return 0; } static int stac92xx_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; hda_nid_t nid = spec->adc_nids[substream->number]; snd_hda_codec_cleanup_stream(codec, nid); if (spec->powerdown_adcs) snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE, AC_PWRST_D3); return 0; } static const struct hda_pcm_stream stac92xx_pcm_digital_playback = { .substreams = 1, .channels_min = 2, .channels_max = 2, /* NID is set in stac92xx_build_pcms */ .ops = { .open = stac92xx_dig_playback_pcm_open, .close = stac92xx_dig_playback_pcm_close, .prepare = stac92xx_dig_playback_pcm_prepare, .cleanup = stac92xx_dig_playback_pcm_cleanup }, }; static const struct hda_pcm_stream stac92xx_pcm_digital_capture = { .substreams = 1, .channels_min = 2, .channels_max = 2, /* NID is set in stac92xx_build_pcms */ }; static const struct hda_pcm_stream stac92xx_pcm_analog_playback = { .substreams = 1, .channels_min = 2, .channels_max = 8, .nid = 0x02, /* NID to query formats and rates */ .ops = { .open = stac92xx_playback_pcm_open, .prepare = stac92xx_playback_pcm_prepare, .cleanup = stac92xx_playback_pcm_cleanup }, }; static const struct hda_pcm_stream stac92xx_pcm_analog_alt_playback = { .substreams = 1, .channels_min = 2, .channels_max = 2, .nid = 0x06, /* NID to query formats and rates */ .ops = { .open = stac92xx_playback_pcm_open, .prepare = stac92xx_playback_pcm_prepare, .cleanup = stac92xx_playback_pcm_cleanup }, }; static const struct hda_pcm_stream stac92xx_pcm_analog_capture = { .channels_min = 2, .channels_max = 2, /* NID + .substreams is set in stac92xx_build_pcms */ .ops = { .prepare = stac92xx_capture_pcm_prepare, .cleanup = stac92xx_capture_pcm_cleanup }, }; static int stac92xx_build_pcms(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct hda_pcm *info = spec->pcm_rec; codec->num_pcms = 1; codec->pcm_info = info; info->name = "STAC92xx Analog"; info->stream[SNDRV_PCM_STREAM_PLAYBACK] = stac92xx_pcm_analog_playback; info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->multiout.dac_nids[0]; info->stream[SNDRV_PCM_STREAM_CAPTURE] = stac92xx_pcm_analog_capture; info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adc_nids[0]; info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = spec->num_adcs; if (spec->alt_switch) { codec->num_pcms++; info++; info->name = "STAC92xx Analog Alt"; info->stream[SNDRV_PCM_STREAM_PLAYBACK] = stac92xx_pcm_analog_alt_playback; } if (spec->multiout.dig_out_nid || spec->dig_in_nid) { codec->num_pcms++; info++; info->name = "STAC92xx Digital"; info->pcm_type = spec->autocfg.dig_out_type[0]; if (spec->multiout.dig_out_nid) { info->stream[SNDRV_PCM_STREAM_PLAYBACK] = stac92xx_pcm_digital_playback; info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->multiout.dig_out_nid; } if (spec->dig_in_nid) { info->stream[SNDRV_PCM_STREAM_CAPTURE] = stac92xx_pcm_digital_capture; info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->dig_in_nid; } } return 0; } static unsigned int stac92xx_get_default_vref(struct hda_codec *codec, hda_nid_t nid) { unsigned int pincap = snd_hda_query_pin_caps(codec, nid); pincap = (pincap & AC_PINCAP_VREF) >> AC_PINCAP_VREF_SHIFT; if (pincap & AC_PINCAP_VREF_100) return AC_PINCTL_VREF_100; if (pincap & AC_PINCAP_VREF_80) return AC_PINCTL_VREF_80; if (pincap & AC_PINCAP_VREF_50) return AC_PINCTL_VREF_50; if (pincap & AC_PINCAP_VREF_GRD) return AC_PINCTL_VREF_GRD; return 0; } static void stac92xx_auto_set_pinctl(struct hda_codec *codec, hda_nid_t nid, int pin_type) { snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, pin_type); } #define stac92xx_hp_switch_info snd_ctl_boolean_mono_info static int stac92xx_hp_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; ucontrol->value.integer.value[0] = !!spec->hp_switch; return 0; } static void stac_issue_unsol_event(struct hda_codec *codec, hda_nid_t nid); static int stac92xx_hp_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; int nid = kcontrol->private_value; spec->hp_switch = ucontrol->value.integer.value[0] ? nid : 0; /* check to be sure that the ports are up to date with * switch changes */ stac_issue_unsol_event(codec, nid); return 1; } static int stac92xx_dc_bias_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int i; static const char * const texts[] = { "Mic In", "Line In", "Line Out" }; struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; hda_nid_t nid = kcontrol->private_value; if (nid == spec->mic_switch || nid == spec->line_switch) i = 3; else i = 2; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->value.enumerated.items = i; uinfo->count = 1; if (uinfo->value.enumerated.item >= i) uinfo->value.enumerated.item = i-1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int stac92xx_dc_bias_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); hda_nid_t nid = kcontrol->private_value; unsigned int vref = stac92xx_vref_get(codec, nid); if (vref == stac92xx_get_default_vref(codec, nid)) ucontrol->value.enumerated.item[0] = 0; else if (vref == AC_PINCTL_VREF_GRD) ucontrol->value.enumerated.item[0] = 1; else if (vref == AC_PINCTL_VREF_HIZ) ucontrol->value.enumerated.item[0] = 2; return 0; } static int stac92xx_dc_bias_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int new_vref = 0; int error; hda_nid_t nid = kcontrol->private_value; if (ucontrol->value.enumerated.item[0] == 0) new_vref = stac92xx_get_default_vref(codec, nid); else if (ucontrol->value.enumerated.item[0] == 1) new_vref = AC_PINCTL_VREF_GRD; else if (ucontrol->value.enumerated.item[0] == 2) new_vref = AC_PINCTL_VREF_HIZ; else return 0; if (new_vref != stac92xx_vref_get(codec, nid)) { error = stac92xx_vref_set(codec, nid, new_vref); return error; } return 0; } static int stac92xx_io_switch_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { char *texts[2]; struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; if (kcontrol->private_value == spec->line_switch) texts[0] = "Line In"; else texts[0] = "Mic In"; texts[1] = "Line Out"; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->value.enumerated.items = 2; uinfo->count = 1; if (uinfo->value.enumerated.item >= 2) uinfo->value.enumerated.item = 1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int stac92xx_io_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; hda_nid_t nid = kcontrol->private_value; int io_idx = (nid == spec->mic_switch) ? 1 : 0; ucontrol->value.enumerated.item[0] = spec->io_switch[io_idx]; return 0; } static int stac92xx_io_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; hda_nid_t nid = kcontrol->private_value; int io_idx = (nid == spec->mic_switch) ? 1 : 0; unsigned short val = !!ucontrol->value.enumerated.item[0]; spec->io_switch[io_idx] = val; if (val) stac92xx_auto_set_pinctl(codec, nid, AC_PINCTL_OUT_EN); else { unsigned int pinctl = AC_PINCTL_IN_EN; if (io_idx) /* set VREF for mic */ pinctl |= stac92xx_get_default_vref(codec, nid); stac92xx_auto_set_pinctl(codec, nid, pinctl); } /* check the auto-mute again: we need to mute/unmute the speaker * appropriately according to the pin direction */ if (spec->hp_detect) stac_issue_unsol_event(codec, nid); return 1; } #define stac92xx_clfe_switch_info snd_ctl_boolean_mono_info static int stac92xx_clfe_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; ucontrol->value.integer.value[0] = spec->clfe_swap; return 0; } static int stac92xx_clfe_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; hda_nid_t nid = kcontrol->private_value & 0xff; unsigned int val = !!ucontrol->value.integer.value[0]; if (spec->clfe_swap == val) return 0; spec->clfe_swap = val; snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_EAPD_BTLENABLE, spec->clfe_swap ? 0x4 : 0x0); return 1; } #define STAC_CODEC_HP_SWITCH(xname) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = xname, \ .index = 0, \ .info = stac92xx_hp_switch_info, \ .get = stac92xx_hp_switch_get, \ .put = stac92xx_hp_switch_put, \ } #define STAC_CODEC_IO_SWITCH(xname, xpval) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = xname, \ .index = 0, \ .info = stac92xx_io_switch_info, \ .get = stac92xx_io_switch_get, \ .put = stac92xx_io_switch_put, \ .private_value = xpval, \ } #define STAC_CODEC_CLFE_SWITCH(xname, xpval) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = xname, \ .index = 0, \ .info = stac92xx_clfe_switch_info, \ .get = stac92xx_clfe_switch_get, \ .put = stac92xx_clfe_switch_put, \ .private_value = xpval, \ } enum { STAC_CTL_WIDGET_VOL, STAC_CTL_WIDGET_MUTE, STAC_CTL_WIDGET_MUTE_BEEP, STAC_CTL_WIDGET_MONO_MUX, STAC_CTL_WIDGET_HP_SWITCH, STAC_CTL_WIDGET_IO_SWITCH, STAC_CTL_WIDGET_CLFE_SWITCH, STAC_CTL_WIDGET_DC_BIAS }; static const struct snd_kcontrol_new stac92xx_control_templates[] = { HDA_CODEC_VOLUME(NULL, 0, 0, 0), HDA_CODEC_MUTE(NULL, 0, 0, 0), HDA_CODEC_MUTE_BEEP(NULL, 0, 0, 0), STAC_MONO_MUX, STAC_CODEC_HP_SWITCH(NULL), STAC_CODEC_IO_SWITCH(NULL, 0), STAC_CODEC_CLFE_SWITCH(NULL, 0), DC_BIAS(NULL, 0, 0), }; /* add dynamic controls */ static struct snd_kcontrol_new * stac_control_new(struct sigmatel_spec *spec, const struct snd_kcontrol_new *ktemp, const char *name, unsigned int subdev) { struct snd_kcontrol_new *knew; snd_array_init(&spec->kctls, sizeof(*knew), 32); knew = snd_array_new(&spec->kctls); if (!knew) return NULL; *knew = *ktemp; knew->name = kstrdup(name, GFP_KERNEL); if (!knew->name) { /* roolback */ memset(knew, 0, sizeof(*knew)); spec->kctls.alloced--; return NULL; } knew->subdevice = subdev; return knew; } static int stac92xx_add_control_temp(struct sigmatel_spec *spec, const struct snd_kcontrol_new *ktemp, int idx, const char *name, unsigned long val) { struct snd_kcontrol_new *knew = stac_control_new(spec, ktemp, name, HDA_SUBDEV_AMP_FLAG); if (!knew) return -ENOMEM; knew->index = idx; knew->private_value = val; return 0; } static inline int stac92xx_add_control_idx(struct sigmatel_spec *spec, int type, int idx, const char *name, unsigned long val) { return stac92xx_add_control_temp(spec, &stac92xx_control_templates[type], idx, name, val); } /* add dynamic controls */ static inline int stac92xx_add_control(struct sigmatel_spec *spec, int type, const char *name, unsigned long val) { return stac92xx_add_control_idx(spec, type, 0, name, val); } static const struct snd_kcontrol_new stac_input_src_temp = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Input Source", .info = stac92xx_mux_enum_info, .get = stac92xx_mux_enum_get, .put = stac92xx_mux_enum_put, }; static inline int stac92xx_add_jack_mode_control(struct hda_codec *codec, hda_nid_t nid, int idx) { int def_conf = snd_hda_codec_get_pincfg(codec, nid); int control = 0; struct sigmatel_spec *spec = codec->spec; char name[22]; if (snd_hda_get_input_pin_attr(def_conf) != INPUT_PIN_ATTR_INT) { if (stac92xx_get_default_vref(codec, nid) == AC_PINCTL_VREF_GRD && nid == spec->line_switch) control = STAC_CTL_WIDGET_IO_SWITCH; else if (snd_hda_query_pin_caps(codec, nid) & (AC_PINCAP_VREF_GRD << AC_PINCAP_VREF_SHIFT)) control = STAC_CTL_WIDGET_DC_BIAS; else if (nid == spec->mic_switch) control = STAC_CTL_WIDGET_IO_SWITCH; } if (control) { snd_hda_get_pin_label(codec, nid, &spec->autocfg, name, sizeof(name), NULL); return stac92xx_add_control(codec->spec, control, strcat(name, " Jack Mode"), nid); } return 0; } static int stac92xx_add_input_source(struct sigmatel_spec *spec) { struct snd_kcontrol_new *knew; struct hda_input_mux *imux = &spec->private_imux; if (spec->auto_mic) return 0; /* no need for input source */ if (!spec->num_adcs || imux->num_items <= 1) return 0; /* no need for input source control */ knew = stac_control_new(spec, &stac_input_src_temp, stac_input_src_temp.name, 0); if (!knew) return -ENOMEM; knew->count = spec->num_adcs; return 0; } /* check whether the line-input can be used as line-out */ static hda_nid_t check_line_out_switch(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; hda_nid_t nid; unsigned int pincap; int i; if (cfg->line_out_type != AUTO_PIN_LINE_OUT) return 0; for (i = 0; i < cfg->num_inputs; i++) { if (cfg->inputs[i].type == AUTO_PIN_LINE_IN) { nid = cfg->inputs[i].pin; pincap = snd_hda_query_pin_caps(codec, nid); if (pincap & AC_PINCAP_OUT) return nid; } } return 0; } static hda_nid_t get_unassigned_dac(struct hda_codec *codec, hda_nid_t nid); /* check whether the mic-input can be used as line-out */ static hda_nid_t check_mic_out_switch(struct hda_codec *codec, hda_nid_t *dac) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; unsigned int def_conf, pincap; int i; *dac = 0; if (cfg->line_out_type != AUTO_PIN_LINE_OUT) return 0; for (i = 0; i < cfg->num_inputs; i++) { hda_nid_t nid = cfg->inputs[i].pin; if (cfg->inputs[i].type != AUTO_PIN_MIC) continue; def_conf = snd_hda_codec_get_pincfg(codec, nid); /* some laptops have an internal analog microphone * which can't be used as a output */ if (snd_hda_get_input_pin_attr(def_conf) != INPUT_PIN_ATTR_INT) { pincap = snd_hda_query_pin_caps(codec, nid); if (pincap & AC_PINCAP_OUT) { *dac = get_unassigned_dac(codec, nid); if (*dac) return nid; } } } return 0; } static int is_in_dac_nids(struct sigmatel_spec *spec, hda_nid_t nid) { int i; for (i = 0; i < spec->multiout.num_dacs; i++) { if (spec->multiout.dac_nids[i] == nid) return 1; } return 0; } static int check_all_dac_nids(struct sigmatel_spec *spec, hda_nid_t nid) { int i; if (is_in_dac_nids(spec, nid)) return 1; for (i = 0; i < spec->autocfg.hp_outs; i++) if (spec->hp_dacs[i] == nid) return 1; for (i = 0; i < spec->autocfg.speaker_outs; i++) if (spec->speaker_dacs[i] == nid) return 1; return 0; } static hda_nid_t get_unassigned_dac(struct hda_codec *codec, hda_nid_t nid) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int j, conn_len; hda_nid_t conn[HDA_MAX_CONNECTIONS], fallback_dac; unsigned int wcaps, wtype; conn_len = snd_hda_get_connections(codec, nid, conn, HDA_MAX_CONNECTIONS); /* 92HD88: trace back up the link of nids to find the DAC */ while (conn_len == 1 && (get_wcaps_type(get_wcaps(codec, conn[0])) != AC_WID_AUD_OUT)) { nid = conn[0]; conn_len = snd_hda_get_connections(codec, nid, conn, HDA_MAX_CONNECTIONS); } for (j = 0; j < conn_len; j++) { wcaps = get_wcaps(codec, conn[j]); wtype = get_wcaps_type(wcaps); /* we check only analog outputs */ if (wtype != AC_WID_AUD_OUT || (wcaps & AC_WCAP_DIGITAL)) continue; /* if this route has a free DAC, assign it */ if (!check_all_dac_nids(spec, conn[j])) { if (conn_len > 1) { /* select this DAC in the pin's input mux */ snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, j); } return conn[j]; } } /* if all DACs are already assigned, connect to the primary DAC, unless we're assigning a secondary headphone */ fallback_dac = spec->multiout.dac_nids[0]; if (spec->multiout.hp_nid) { for (j = 0; j < cfg->hp_outs; j++) if (cfg->hp_pins[j] == nid) { fallback_dac = spec->multiout.hp_nid; break; } } if (conn_len > 1) { for (j = 0; j < conn_len; j++) { if (conn[j] == fallback_dac) { snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, j); break; } } } return 0; } static int add_spec_dacs(struct sigmatel_spec *spec, hda_nid_t nid); static int add_spec_extra_dacs(struct sigmatel_spec *spec, hda_nid_t nid); /* * Fill in the dac_nids table from the parsed pin configuration * This function only works when every pin in line_out_pins[] * contains atleast one DAC in its connection list. Some 92xx * codecs are not connected directly to a DAC, such as the 9200 * and 9202/925x. For those, dac_nids[] must be hard-coded. */ static int stac92xx_auto_fill_dac_nids(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int i; hda_nid_t nid, dac; for (i = 0; i < cfg->line_outs; i++) { nid = cfg->line_out_pins[i]; dac = get_unassigned_dac(codec, nid); if (!dac) { if (spec->multiout.num_dacs > 0) { /* we have already working output pins, * so let's drop the broken ones again */ cfg->line_outs = spec->multiout.num_dacs; break; } /* error out, no available DAC found */ snd_printk(KERN_ERR "%s: No available DAC for pin 0x%x\n", __func__, nid); return -ENODEV; } add_spec_dacs(spec, dac); } for (i = 0; i < cfg->hp_outs; i++) { nid = cfg->hp_pins[i]; dac = get_unassigned_dac(codec, nid); if (dac) { if (!spec->multiout.hp_nid) spec->multiout.hp_nid = dac; else add_spec_extra_dacs(spec, dac); } spec->hp_dacs[i] = dac; } for (i = 0; i < cfg->speaker_outs; i++) { nid = cfg->speaker_pins[i]; dac = get_unassigned_dac(codec, nid); if (dac) add_spec_extra_dacs(spec, dac); spec->speaker_dacs[i] = dac; } /* add line-in as output */ nid = check_line_out_switch(codec); if (nid) { dac = get_unassigned_dac(codec, nid); if (dac) { snd_printdd("STAC: Add line-in 0x%x as output %d\n", nid, cfg->line_outs); cfg->line_out_pins[cfg->line_outs] = nid; cfg->line_outs++; spec->line_switch = nid; add_spec_dacs(spec, dac); } } /* add mic as output */ nid = check_mic_out_switch(codec, &dac); if (nid && dac) { snd_printdd("STAC: Add mic-in 0x%x as output %d\n", nid, cfg->line_outs); cfg->line_out_pins[cfg->line_outs] = nid; cfg->line_outs++; spec->mic_switch = nid; add_spec_dacs(spec, dac); } snd_printd("stac92xx: dac_nids=%d (0x%x/0x%x/0x%x/0x%x/0x%x)\n", spec->multiout.num_dacs, spec->multiout.dac_nids[0], spec->multiout.dac_nids[1], spec->multiout.dac_nids[2], spec->multiout.dac_nids[3], spec->multiout.dac_nids[4]); return 0; } /* create volume control/switch for the given prefx type */ static int create_controls_idx(struct hda_codec *codec, const char *pfx, int idx, hda_nid_t nid, int chs) { struct sigmatel_spec *spec = codec->spec; char name[32]; int err; if (!spec->check_volume_offset) { unsigned int caps, step, nums, db_scale; caps = query_amp_caps(codec, nid, HDA_OUTPUT); step = (caps & AC_AMPCAP_STEP_SIZE) >> AC_AMPCAP_STEP_SIZE_SHIFT; step = (step + 1) * 25; /* in .01dB unit */ nums = (caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT; db_scale = nums * step; /* if dB scale is over -64dB, and finer enough, * let's reduce it to half */ if (db_scale > 6400 && nums >= 0x1f) spec->volume_offset = nums / 2; spec->check_volume_offset = 1; } sprintf(name, "%s Playback Volume", pfx); err = stac92xx_add_control_idx(spec, STAC_CTL_WIDGET_VOL, idx, name, HDA_COMPOSE_AMP_VAL_OFS(nid, chs, 0, HDA_OUTPUT, spec->volume_offset)); if (err < 0) return err; sprintf(name, "%s Playback Switch", pfx); err = stac92xx_add_control_idx(spec, STAC_CTL_WIDGET_MUTE, idx, name, HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_OUTPUT)); if (err < 0) return err; return 0; } #define create_controls(codec, pfx, nid, chs) \ create_controls_idx(codec, pfx, 0, nid, chs) static int add_spec_dacs(struct sigmatel_spec *spec, hda_nid_t nid) { if (spec->multiout.num_dacs > 4) { printk(KERN_WARNING "stac92xx: No space for DAC 0x%x\n", nid); return 1; } else { snd_BUG_ON(spec->multiout.dac_nids != spec->dac_nids); spec->dac_nids[spec->multiout.num_dacs] = nid; spec->multiout.num_dacs++; } return 0; } static int add_spec_extra_dacs(struct sigmatel_spec *spec, hda_nid_t nid) { int i; for (i = 0; i < ARRAY_SIZE(spec->multiout.extra_out_nid); i++) { if (!spec->multiout.extra_out_nid[i]) { spec->multiout.extra_out_nid[i] = nid; return 0; } } printk(KERN_WARNING "stac92xx: No space for extra DAC 0x%x\n", nid); return 1; } /* Create output controls * The mixer elements are named depending on the given type (AUTO_PIN_XXX_OUT) */ static int create_multi_out_ctls(struct hda_codec *codec, int num_outs, const hda_nid_t *pins, const hda_nid_t *dac_nids, int type) { struct sigmatel_spec *spec = codec->spec; static const char * const chname[4] = { "Front", "Surround", NULL /*CLFE*/, "Side" }; hda_nid_t nid; int i, err; unsigned int wid_caps; for (i = 0; i < num_outs && i < ARRAY_SIZE(chname); i++) { if (type == AUTO_PIN_HP_OUT && !spec->hp_detect) { if (is_jack_detectable(codec, pins[i])) spec->hp_detect = 1; } nid = dac_nids[i]; if (!nid) continue; if (type != AUTO_PIN_HP_OUT && i == 2) { /* Center/LFE */ err = create_controls(codec, "Center", nid, 1); if (err < 0) return err; err = create_controls(codec, "LFE", nid, 2); if (err < 0) return err; wid_caps = get_wcaps(codec, nid); if (wid_caps & AC_WCAP_LR_SWAP) { err = stac92xx_add_control(spec, STAC_CTL_WIDGET_CLFE_SWITCH, "Swap Center/LFE Playback Switch", nid); if (err < 0) return err; } } else { const char *name; int idx; switch (type) { case AUTO_PIN_HP_OUT: name = "Headphone"; idx = i; break; case AUTO_PIN_SPEAKER_OUT: name = "Speaker"; idx = i; break; default: name = chname[i]; idx = 0; break; } err = create_controls_idx(codec, name, idx, nid, 3); if (err < 0) return err; } } return 0; } static int stac92xx_add_capvol_ctls(struct hda_codec *codec, unsigned long vol, unsigned long sw, int idx) { int err; err = stac92xx_add_control_idx(codec->spec, STAC_CTL_WIDGET_VOL, idx, "Capture Volume", vol); if (err < 0) return err; err = stac92xx_add_control_idx(codec->spec, STAC_CTL_WIDGET_MUTE, idx, "Capture Switch", sw); if (err < 0) return err; return 0; } /* add playback controls from the parsed DAC table */ static int stac92xx_auto_create_multi_out_ctls(struct hda_codec *codec, const struct auto_pin_cfg *cfg) { struct sigmatel_spec *spec = codec->spec; hda_nid_t nid; int err; int idx; err = create_multi_out_ctls(codec, cfg->line_outs, cfg->line_out_pins, spec->multiout.dac_nids, cfg->line_out_type); if (err < 0) return err; if (cfg->hp_outs > 1 && cfg->line_out_type == AUTO_PIN_LINE_OUT) { err = stac92xx_add_control(spec, STAC_CTL_WIDGET_HP_SWITCH, "Headphone as Line Out Switch", cfg->hp_pins[cfg->hp_outs - 1]); if (err < 0) return err; } for (idx = 0; idx < cfg->num_inputs; idx++) { if (cfg->inputs[idx].type > AUTO_PIN_LINE_IN) break; nid = cfg->inputs[idx].pin; err = stac92xx_add_jack_mode_control(codec, nid, idx); if (err < 0) return err; } return 0; } /* add playback controls for Speaker and HP outputs */ static int stac92xx_auto_create_hp_ctls(struct hda_codec *codec, struct auto_pin_cfg *cfg) { struct sigmatel_spec *spec = codec->spec; int err; err = create_multi_out_ctls(codec, cfg->hp_outs, cfg->hp_pins, spec->hp_dacs, AUTO_PIN_HP_OUT); if (err < 0) return err; err = create_multi_out_ctls(codec, cfg->speaker_outs, cfg->speaker_pins, spec->speaker_dacs, AUTO_PIN_SPEAKER_OUT); if (err < 0) return err; return 0; } /* labels for mono mux outputs */ static const char * const stac92xx_mono_labels[4] = { "DAC0", "DAC1", "Mixer", "DAC2" }; /* create mono mux for mono out on capable codecs */ static int stac92xx_auto_create_mono_output_ctls(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct hda_input_mux *mono_mux = &spec->private_mono_mux; int i, num_cons; hda_nid_t con_lst[ARRAY_SIZE(stac92xx_mono_labels)]; num_cons = snd_hda_get_connections(codec, spec->mono_nid, con_lst, HDA_MAX_NUM_INPUTS); if (num_cons <= 0 || num_cons > ARRAY_SIZE(stac92xx_mono_labels)) return -EINVAL; for (i = 0; i < num_cons; i++) snd_hda_add_imux_item(mono_mux, stac92xx_mono_labels[i], i, NULL); return stac92xx_add_control(spec, STAC_CTL_WIDGET_MONO_MUX, "Mono Mux", spec->mono_nid); } /* create PC beep volume controls */ static int stac92xx_auto_create_beep_ctls(struct hda_codec *codec, hda_nid_t nid) { struct sigmatel_spec *spec = codec->spec; u32 caps = query_amp_caps(codec, nid, HDA_OUTPUT); int err, type = STAC_CTL_WIDGET_MUTE_BEEP; if (spec->anabeep_nid == nid) type = STAC_CTL_WIDGET_MUTE; /* check for mute support for the the amp */ if ((caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT) { err = stac92xx_add_control(spec, type, "Beep Playback Switch", HDA_COMPOSE_AMP_VAL(nid, 1, 0, HDA_OUTPUT)); if (err < 0) return err; } /* check to see if there is volume support for the amp */ if ((caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT) { err = stac92xx_add_control(spec, STAC_CTL_WIDGET_VOL, "Beep Playback Volume", HDA_COMPOSE_AMP_VAL(nid, 1, 0, HDA_OUTPUT)); if (err < 0) return err; } return 0; } #ifdef CONFIG_SND_HDA_INPUT_BEEP #define stac92xx_dig_beep_switch_info snd_ctl_boolean_mono_info static int stac92xx_dig_beep_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = codec->beep->enabled; return 0; } static int stac92xx_dig_beep_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); return snd_hda_enable_beep_device(codec, ucontrol->value.integer.value[0]); } static const struct snd_kcontrol_new stac92xx_dig_beep_ctrl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = stac92xx_dig_beep_switch_info, .get = stac92xx_dig_beep_switch_get, .put = stac92xx_dig_beep_switch_put, }; static int stac92xx_beep_switch_ctl(struct hda_codec *codec) { return stac92xx_add_control_temp(codec->spec, &stac92xx_dig_beep_ctrl, 0, "Beep Playback Switch", 0); } #endif static int stac92xx_auto_create_mux_input_ctls(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; int i, j, err = 0; for (i = 0; i < spec->num_muxes; i++) { hda_nid_t nid; unsigned int wcaps; unsigned long val; nid = spec->mux_nids[i]; wcaps = get_wcaps(codec, nid); if (!(wcaps & AC_WCAP_OUT_AMP)) continue; /* check whether already the same control was created as * normal Capture Volume. */ val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT); for (j = 0; j < spec->num_caps; j++) { if (spec->capvols[j] == val) break; } if (j < spec->num_caps) continue; err = stac92xx_add_control_idx(spec, STAC_CTL_WIDGET_VOL, i, "Mux Capture Volume", val); if (err < 0) return err; } return 0; }; static const char * const stac92xx_spdif_labels[3] = { "Digital Playback", "Analog Mux 1", "Analog Mux 2", }; static int stac92xx_auto_create_spdif_mux_ctls(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct hda_input_mux *spdif_mux = &spec->private_smux; const char * const *labels = spec->spdif_labels; int i, num_cons; hda_nid_t con_lst[HDA_MAX_NUM_INPUTS]; num_cons = snd_hda_get_connections(codec, spec->smux_nids[0], con_lst, HDA_MAX_NUM_INPUTS); if (num_cons <= 0) return -EINVAL; if (!labels) labels = stac92xx_spdif_labels; for (i = 0; i < num_cons; i++) snd_hda_add_imux_item(spdif_mux, labels[i], i, NULL); return 0; } /* labels for dmic mux inputs */ static const char * const stac92xx_dmic_labels[5] = { "Analog Inputs", "Digital Mic 1", "Digital Mic 2", "Digital Mic 3", "Digital Mic 4" }; static hda_nid_t get_connected_node(struct hda_codec *codec, hda_nid_t mux, int idx) { hda_nid_t conn[HDA_MAX_NUM_INPUTS]; int nums; nums = snd_hda_get_connections(codec, mux, conn, ARRAY_SIZE(conn)); if (idx >= 0 && idx < nums) return conn[idx]; return 0; } /* look for NID recursively */ #define get_connection_index(codec, mux, nid) \ snd_hda_get_conn_index(codec, mux, nid, 1) /* create a volume assigned to the given pin (only if supported) */ /* return 1 if the volume control is created */ static int create_elem_capture_vol(struct hda_codec *codec, hda_nid_t nid, const char *label, int idx, int direction) { unsigned int caps, nums; char name[32]; int err; if (direction == HDA_OUTPUT) caps = AC_WCAP_OUT_AMP; else caps = AC_WCAP_IN_AMP; if (!(get_wcaps(codec, nid) & caps)) return 0; caps = query_amp_caps(codec, nid, direction); nums = (caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT; if (!nums) return 0; snprintf(name, sizeof(name), "%s Capture Volume", label); err = stac92xx_add_control_idx(codec->spec, STAC_CTL_WIDGET_VOL, idx, name, HDA_COMPOSE_AMP_VAL(nid, 3, 0, direction)); if (err < 0) return err; return 1; } /* create playback/capture controls for input pins on dmic capable codecs */ static int stac92xx_auto_create_dmic_input_ctls(struct hda_codec *codec, const struct auto_pin_cfg *cfg) { struct sigmatel_spec *spec = codec->spec; struct hda_input_mux *imux = &spec->private_imux; struct hda_input_mux *dimux = &spec->private_dimux; int err, i; unsigned int def_conf; snd_hda_add_imux_item(dimux, stac92xx_dmic_labels[0], 0, NULL); for (i = 0; i < spec->num_dmics; i++) { hda_nid_t nid; int index, type_idx; char label[32]; nid = spec->dmic_nids[i]; if (get_wcaps_type(get_wcaps(codec, nid)) != AC_WID_PIN) continue; def_conf = snd_hda_codec_get_pincfg(codec, nid); if (get_defcfg_connect(def_conf) == AC_JACK_PORT_NONE) continue; index = get_connection_index(codec, spec->dmux_nids[0], nid); if (index < 0) continue; snd_hda_get_pin_label(codec, nid, &spec->autocfg, label, sizeof(label), NULL); snd_hda_add_imux_item(dimux, label, index, &type_idx); if (snd_hda_get_bool_hint(codec, "separate_dmux") != 1) snd_hda_add_imux_item(imux, label, index, &type_idx); err = create_elem_capture_vol(codec, nid, label, type_idx, HDA_INPUT); if (err < 0) return err; if (!err) { err = create_elem_capture_vol(codec, nid, label, type_idx, HDA_OUTPUT); if (err < 0) return err; if (!err) { nid = get_connected_node(codec, spec->dmux_nids[0], index); if (nid) err = create_elem_capture_vol(codec, nid, label, type_idx, HDA_INPUT); if (err < 0) return err; } } } return 0; } static int check_mic_pin(struct hda_codec *codec, hda_nid_t nid, hda_nid_t *fixed, hda_nid_t *ext, hda_nid_t *dock) { unsigned int cfg; unsigned int type; if (!nid) return 0; cfg = snd_hda_codec_get_pincfg(codec, nid); type = get_defcfg_device(cfg); switch (snd_hda_get_input_pin_attr(cfg)) { case INPUT_PIN_ATTR_INT: if (*fixed) return 1; /* already occupied */ if (type != AC_JACK_MIC_IN) return 1; /* invalid type */ *fixed = nid; break; case INPUT_PIN_ATTR_UNUSED: break; case INPUT_PIN_ATTR_DOCK: if (*dock) return 1; /* already occupied */ if (type != AC_JACK_MIC_IN && type != AC_JACK_LINE_IN) return 1; /* invalid type */ *dock = nid; break; default: if (*ext) return 1; /* already occupied */ if (type != AC_JACK_MIC_IN) return 1; /* invalid type */ *ext = nid; break; } return 0; } static int set_mic_route(struct hda_codec *codec, struct sigmatel_mic_route *mic, hda_nid_t pin) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int i; mic->pin = pin; if (pin == 0) return 0; for (i = 0; i < cfg->num_inputs; i++) { if (pin == cfg->inputs[i].pin) break; } if (i < cfg->num_inputs && cfg->inputs[i].type == AUTO_PIN_MIC) { /* analog pin */ i = get_connection_index(codec, spec->mux_nids[0], pin); if (i < 0) return -1; mic->mux_idx = i; mic->dmux_idx = -1; if (spec->dmux_nids) mic->dmux_idx = get_connection_index(codec, spec->dmux_nids[0], spec->mux_nids[0]); } else if (spec->dmux_nids) { /* digital pin */ i = get_connection_index(codec, spec->dmux_nids[0], pin); if (i < 0) return -1; mic->dmux_idx = i; mic->mux_idx = -1; if (spec->mux_nids) mic->mux_idx = get_connection_index(codec, spec->mux_nids[0], spec->dmux_nids[0]); } return 0; } /* return non-zero if the device is for automatic mic switch */ static int stac_check_auto_mic(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; hda_nid_t fixed, ext, dock; int i; fixed = ext = dock = 0; for (i = 0; i < cfg->num_inputs; i++) if (check_mic_pin(codec, cfg->inputs[i].pin, &fixed, &ext, &dock)) return 0; for (i = 0; i < spec->num_dmics; i++) if (check_mic_pin(codec, spec->dmic_nids[i], &fixed, &ext, &dock)) return 0; if (!fixed || (!ext && !dock)) return 0; /* no input to switch */ if (!is_jack_detectable(codec, ext)) return 0; /* no unsol support */ if (set_mic_route(codec, &spec->ext_mic, ext) || set_mic_route(codec, &spec->int_mic, fixed) || set_mic_route(codec, &spec->dock_mic, dock)) return 0; /* something is wrong */ return 1; } /* create playback/capture controls for input pins */ static int stac92xx_auto_create_analog_input_ctls(struct hda_codec *codec, const struct auto_pin_cfg *cfg) { struct sigmatel_spec *spec = codec->spec; struct hda_input_mux *imux = &spec->private_imux; int i, j; const char *label; for (i = 0; i < cfg->num_inputs; i++) { hda_nid_t nid = cfg->inputs[i].pin; int index, err, type_idx; index = -1; for (j = 0; j < spec->num_muxes; j++) { index = get_connection_index(codec, spec->mux_nids[j], nid); if (index >= 0) break; } if (index < 0) continue; label = hda_get_autocfg_input_label(codec, cfg, i); snd_hda_add_imux_item(imux, label, index, &type_idx); err = create_elem_capture_vol(codec, nid, label, type_idx, HDA_INPUT); if (err < 0) return err; } spec->num_analog_muxes = imux->num_items; if (imux->num_items) { /* * Set the current input for the muxes. * The STAC9221 has two input muxes with identical source * NID lists. Hopefully this won't get confused. */ for (i = 0; i < spec->num_muxes; i++) { snd_hda_codec_write_cache(codec, spec->mux_nids[i], 0, AC_VERB_SET_CONNECT_SEL, imux->items[0].index); } } return 0; } static void stac92xx_auto_init_multi_out(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; int i; for (i = 0; i < spec->autocfg.line_outs; i++) { hda_nid_t nid = spec->autocfg.line_out_pins[i]; stac92xx_auto_set_pinctl(codec, nid, AC_PINCTL_OUT_EN); } } static void stac92xx_auto_init_hp_out(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; int i; for (i = 0; i < spec->autocfg.hp_outs; i++) { hda_nid_t pin; pin = spec->autocfg.hp_pins[i]; if (pin) /* connect to front */ stac92xx_auto_set_pinctl(codec, pin, AC_PINCTL_OUT_EN | AC_PINCTL_HP_EN); } for (i = 0; i < spec->autocfg.speaker_outs; i++) { hda_nid_t pin; pin = spec->autocfg.speaker_pins[i]; if (pin) /* connect to front */ stac92xx_auto_set_pinctl(codec, pin, AC_PINCTL_OUT_EN); } } static int is_dual_headphones(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; int i, valid_hps; if (spec->autocfg.line_out_type != AUTO_PIN_SPEAKER_OUT || spec->autocfg.hp_outs <= 1) return 0; valid_hps = 0; for (i = 0; i < spec->autocfg.hp_outs; i++) { hda_nid_t nid = spec->autocfg.hp_pins[i]; unsigned int cfg = snd_hda_codec_get_pincfg(codec, nid); if (get_defcfg_location(cfg) & AC_JACK_LOC_SEPARATE) continue; valid_hps++; } return (valid_hps > 1); } static int stac92xx_parse_auto_config(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; hda_nid_t dig_out = 0, dig_in = 0; int hp_swap = 0; int i, err; if ((err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, spec->dmic_nids)) < 0) return err; if (! spec->autocfg.line_outs) return 0; /* can't find valid pin config */ /* If we have no real line-out pin and multiple hp-outs, HPs should * be set up as multi-channel outputs. */ if (is_dual_headphones(codec)) { /* Copy hp_outs to line_outs, backup line_outs in * speaker_outs so that the following routines can handle * HP pins as primary outputs. */ snd_printdd("stac92xx: Enabling multi-HPs workaround\n"); memcpy(spec->autocfg.speaker_pins, spec->autocfg.line_out_pins, sizeof(spec->autocfg.line_out_pins)); spec->autocfg.speaker_outs = spec->autocfg.line_outs; memcpy(spec->autocfg.line_out_pins, spec->autocfg.hp_pins, sizeof(spec->autocfg.hp_pins)); spec->autocfg.line_outs = spec->autocfg.hp_outs; spec->autocfg.line_out_type = AUTO_PIN_HP_OUT; spec->autocfg.hp_outs = 0; hp_swap = 1; } if (spec->autocfg.mono_out_pin) { int dir = get_wcaps(codec, spec->autocfg.mono_out_pin) & (AC_WCAP_OUT_AMP | AC_WCAP_IN_AMP); u32 caps = query_amp_caps(codec, spec->autocfg.mono_out_pin, dir); hda_nid_t conn_list[1]; /* get the mixer node and then the mono mux if it exists */ if (snd_hda_get_connections(codec, spec->autocfg.mono_out_pin, conn_list, 1) && snd_hda_get_connections(codec, conn_list[0], conn_list, 1) > 0) { int wcaps = get_wcaps(codec, conn_list[0]); int wid_type = get_wcaps_type(wcaps); /* LR swap check, some stac925x have a mux that * changes the DACs output path instead of the * mono-mux path. */ if (wid_type == AC_WID_AUD_SEL && !(wcaps & AC_WCAP_LR_SWAP)) spec->mono_nid = conn_list[0]; } if (dir) { hda_nid_t nid = spec->autocfg.mono_out_pin; /* most mono outs have a least a mute/unmute switch */ dir = (dir & AC_WCAP_OUT_AMP) ? HDA_OUTPUT : HDA_INPUT; err = stac92xx_add_control(spec, STAC_CTL_WIDGET_MUTE, "Mono Playback Switch", HDA_COMPOSE_AMP_VAL(nid, 1, 0, dir)); if (err < 0) return err; /* check for volume support for the amp */ if ((caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT) { err = stac92xx_add_control(spec, STAC_CTL_WIDGET_VOL, "Mono Playback Volume", HDA_COMPOSE_AMP_VAL(nid, 1, 0, dir)); if (err < 0) return err; } } stac92xx_auto_set_pinctl(codec, spec->autocfg.mono_out_pin, AC_PINCTL_OUT_EN); } if (!spec->multiout.num_dacs) { err = stac92xx_auto_fill_dac_nids(codec); if (err < 0) return err; err = stac92xx_auto_create_multi_out_ctls(codec, &spec->autocfg); if (err < 0) return err; } /* setup analog beep controls */ if (spec->anabeep_nid > 0) { err = stac92xx_auto_create_beep_ctls(codec, spec->anabeep_nid); if (err < 0) return err; } /* setup digital beep controls and input device */ #ifdef CONFIG_SND_HDA_INPUT_BEEP if (spec->digbeep_nid > 0) { hda_nid_t nid = spec->digbeep_nid; unsigned int caps; err = stac92xx_auto_create_beep_ctls(codec, nid); if (err < 0) return err; err = snd_hda_attach_beep_device(codec, nid); if (err < 0) return err; if (codec->beep) { /* IDT/STAC codecs have linear beep tone parameter */ codec->beep->linear_tone = spec->linear_tone_beep; /* if no beep switch is available, make its own one */ caps = query_amp_caps(codec, nid, HDA_OUTPUT); if (!(caps & AC_AMPCAP_MUTE)) { err = stac92xx_beep_switch_ctl(codec); if (err < 0) return err; } } } #endif err = stac92xx_auto_create_hp_ctls(codec, &spec->autocfg); if (err < 0) return err; /* All output parsing done, now restore the swapped hp pins */ if (hp_swap) { memcpy(spec->autocfg.hp_pins, spec->autocfg.line_out_pins, sizeof(spec->autocfg.hp_pins)); spec->autocfg.hp_outs = spec->autocfg.line_outs; spec->autocfg.line_out_type = AUTO_PIN_HP_OUT; spec->autocfg.line_outs = 0; } if (stac_check_auto_mic(codec)) { spec->auto_mic = 1; /* only one capture for auto-mic */ spec->num_adcs = 1; spec->num_caps = 1; spec->num_muxes = 1; } for (i = 0; i < spec->num_caps; i++) { err = stac92xx_add_capvol_ctls(codec, spec->capvols[i], spec->capsws[i], i); if (err < 0) return err; } err = stac92xx_auto_create_analog_input_ctls(codec, &spec->autocfg); if (err < 0) return err; if (spec->mono_nid > 0) { err = stac92xx_auto_create_mono_output_ctls(codec); if (err < 0) return err; } if (spec->num_dmics > 0 && !spec->dinput_mux) if ((err = stac92xx_auto_create_dmic_input_ctls(codec, &spec->autocfg)) < 0) return err; if (spec->num_muxes > 0) { err = stac92xx_auto_create_mux_input_ctls(codec); if (err < 0) return err; } if (spec->num_smuxes > 0) { err = stac92xx_auto_create_spdif_mux_ctls(codec); if (err < 0) return err; } err = stac92xx_add_input_source(spec); if (err < 0) return err; spec->multiout.max_channels = spec->multiout.num_dacs * 2; if (spec->multiout.max_channels > 2) spec->surr_switch = 1; /* find digital out and in converters */ for (i = codec->start_nid; i < codec->start_nid + codec->num_nodes; i++) { unsigned int wid_caps = get_wcaps(codec, i); if (wid_caps & AC_WCAP_DIGITAL) { switch (get_wcaps_type(wid_caps)) { case AC_WID_AUD_OUT: if (!dig_out) dig_out = i; break; case AC_WID_AUD_IN: if (!dig_in) dig_in = i; break; } } } if (spec->autocfg.dig_outs) spec->multiout.dig_out_nid = dig_out; if (dig_in && spec->autocfg.dig_in_pin) spec->dig_in_nid = dig_in; if (spec->kctls.list) spec->mixers[spec->num_mixers++] = spec->kctls.list; spec->input_mux = &spec->private_imux; if (!spec->dinput_mux) spec->dinput_mux = &spec->private_dimux; spec->sinput_mux = &spec->private_smux; spec->mono_mux = &spec->private_mono_mux; return 1; } /* add playback controls for HP output */ static int stac9200_auto_create_hp_ctls(struct hda_codec *codec, struct auto_pin_cfg *cfg) { struct sigmatel_spec *spec = codec->spec; hda_nid_t pin = cfg->hp_pins[0]; if (! pin) return 0; if (is_jack_detectable(codec, pin)) spec->hp_detect = 1; return 0; } /* add playback controls for LFE output */ static int stac9200_auto_create_lfe_ctls(struct hda_codec *codec, struct auto_pin_cfg *cfg) { struct sigmatel_spec *spec = codec->spec; int err; hda_nid_t lfe_pin = 0x0; int i; /* * search speaker outs and line outs for a mono speaker pin * with an amp. If one is found, add LFE controls * for it. */ for (i = 0; i < spec->autocfg.speaker_outs && lfe_pin == 0x0; i++) { hda_nid_t pin = spec->autocfg.speaker_pins[i]; unsigned int wcaps = get_wcaps(codec, pin); wcaps &= (AC_WCAP_STEREO | AC_WCAP_OUT_AMP); if (wcaps == AC_WCAP_OUT_AMP) /* found a mono speaker with an amp, must be lfe */ lfe_pin = pin; } /* if speaker_outs is 0, then speakers may be in line_outs */ if (lfe_pin == 0 && spec->autocfg.speaker_outs == 0) { for (i = 0; i < spec->autocfg.line_outs && lfe_pin == 0x0; i++) { hda_nid_t pin = spec->autocfg.line_out_pins[i]; unsigned int defcfg; defcfg = snd_hda_codec_get_pincfg(codec, pin); if (get_defcfg_device(defcfg) == AC_JACK_SPEAKER) { unsigned int wcaps = get_wcaps(codec, pin); wcaps &= (AC_WCAP_STEREO | AC_WCAP_OUT_AMP); if (wcaps == AC_WCAP_OUT_AMP) /* found a mono speaker with an amp, must be lfe */ lfe_pin = pin; } } } if (lfe_pin) { err = create_controls(codec, "LFE", lfe_pin, 1); if (err < 0) return err; } return 0; } static int stac9200_parse_auto_config(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; int err; if ((err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, NULL)) < 0) return err; if ((err = stac92xx_auto_create_analog_input_ctls(codec, &spec->autocfg)) < 0) return err; if ((err = stac9200_auto_create_hp_ctls(codec, &spec->autocfg)) < 0) return err; if ((err = stac9200_auto_create_lfe_ctls(codec, &spec->autocfg)) < 0) return err; if (spec->num_muxes > 0) { err = stac92xx_auto_create_mux_input_ctls(codec); if (err < 0) return err; } err = stac92xx_add_input_source(spec); if (err < 0) return err; if (spec->autocfg.dig_outs) spec->multiout.dig_out_nid = 0x05; if (spec->autocfg.dig_in_pin) spec->dig_in_nid = 0x04; if (spec->kctls.list) spec->mixers[spec->num_mixers++] = spec->kctls.list; spec->input_mux = &spec->private_imux; spec->dinput_mux = &spec->private_dimux; return 1; } /* * Early 2006 Intel Macintoshes with STAC9220X5 codecs seem to have a * funky external mute control using GPIO pins. */ static void stac_gpio_set(struct hda_codec *codec, unsigned int mask, unsigned int dir_mask, unsigned int data) { unsigned int gpiostate, gpiomask, gpiodir; snd_printdd("%s msk %x dir %x gpio %x\n", __func__, mask, dir_mask, data); gpiostate = snd_hda_codec_read(codec, codec->afg, 0, AC_VERB_GET_GPIO_DATA, 0); gpiostate = (gpiostate & ~dir_mask) | (data & dir_mask); gpiomask = snd_hda_codec_read(codec, codec->afg, 0, AC_VERB_GET_GPIO_MASK, 0); gpiomask |= mask; gpiodir = snd_hda_codec_read(codec, codec->afg, 0, AC_VERB_GET_GPIO_DIRECTION, 0); gpiodir |= dir_mask; /* Configure GPIOx as CMOS */ snd_hda_codec_write(codec, codec->afg, 0, 0x7e7, 0); snd_hda_codec_write(codec, codec->afg, 0, AC_VERB_SET_GPIO_MASK, gpiomask); snd_hda_codec_read(codec, codec->afg, 0, AC_VERB_SET_GPIO_DIRECTION, gpiodir); /* sync */ msleep(1); snd_hda_codec_read(codec, codec->afg, 0, AC_VERB_SET_GPIO_DATA, gpiostate); /* sync */ } static int stac_add_event(struct hda_codec *codec, hda_nid_t nid, unsigned char type, int data) { struct hda_jack_tbl *event; event = snd_hda_jack_tbl_new(codec, nid); if (!event) return -ENOMEM; event->action = type; event->private_data = data; return 0; } /* check if given nid is a valid pin and no other events are assigned * to it. If OK, assign the event, set the unsol flag, and returns 1. * Otherwise, returns zero. */ static int enable_pin_detect(struct hda_codec *codec, hda_nid_t nid, unsigned int type) { struct hda_jack_tbl *event; if (!is_jack_detectable(codec, nid)) return 0; event = snd_hda_jack_tbl_new(codec, nid); if (!event) return -ENOMEM; if (event->action && event->action != type) return 0; event->action = type; snd_hda_jack_detect_enable(codec, nid, 0); return 1; } static int is_nid_out_jack_pin(struct auto_pin_cfg *cfg, hda_nid_t nid) { int i; for (i = 0; i < cfg->hp_outs; i++) if (cfg->hp_pins[i] == nid) return 1; /* nid is a HP-Out */ for (i = 0; i < cfg->line_outs; i++) if (cfg->line_out_pins[i] == nid) return 1; /* nid is a line-Out */ return 0; /* nid is not a HP-Out */ }; static void stac92xx_power_down(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; /* power down inactive DACs */ const hda_nid_t *dac; for (dac = spec->dac_list; *dac; dac++) if (!check_all_dac_nids(spec, *dac)) snd_hda_codec_write(codec, *dac, 0, AC_VERB_SET_POWER_STATE, AC_PWRST_D3); } static void stac_toggle_power_map(struct hda_codec *codec, hda_nid_t nid, int enable); static inline int get_int_hint(struct hda_codec *codec, const char *key, int *valp) { const char *p; p = snd_hda_get_hint(codec, key); if (p) { unsigned long val; if (!strict_strtoul(p, 0, &val)) { *valp = val; return 1; } } return 0; } /* override some hints from the hwdep entry */ static void stac_store_hints(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; int val; val = snd_hda_get_bool_hint(codec, "hp_detect"); if (val >= 0) spec->hp_detect = val; if (get_int_hint(codec, "gpio_mask", &spec->gpio_mask)) { spec->eapd_mask = spec->gpio_dir = spec->gpio_data = spec->gpio_mask; } if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir)) spec->gpio_mask &= spec->gpio_mask; if (get_int_hint(codec, "gpio_data", &spec->gpio_data)) spec->gpio_dir &= spec->gpio_mask; if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask)) spec->eapd_mask &= spec->gpio_mask; if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute)) spec->gpio_mute &= spec->gpio_mask; val = snd_hda_get_bool_hint(codec, "eapd_switch"); if (val >= 0) spec->eapd_switch = val; get_int_hint(codec, "gpio_led_polarity", &spec->gpio_led_polarity); if (get_int_hint(codec, "gpio_led", &spec->gpio_led)) { spec->gpio_mask |= spec->gpio_led; spec->gpio_dir |= spec->gpio_led; if (spec->gpio_led_polarity) spec->gpio_data |= spec->gpio_led; } } static void stac_issue_unsol_events(struct hda_codec *codec, int num_pins, const hda_nid_t *pins) { while (num_pins--) stac_issue_unsol_event(codec, *pins++); } /* fake event to set up pins */ static void stac_fake_hp_events(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; if (spec->autocfg.hp_outs) stac_issue_unsol_events(codec, spec->autocfg.hp_outs, spec->autocfg.hp_pins); if (spec->autocfg.line_outs && spec->autocfg.line_out_pins[0] != spec->autocfg.hp_pins[0]) stac_issue_unsol_events(codec, spec->autocfg.line_outs, spec->autocfg.line_out_pins); } static int stac92xx_init(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; unsigned int gpio; int i; snd_hda_sequence_write(codec, spec->init); /* power down adcs initially */ if (spec->powerdown_adcs) for (i = 0; i < spec->num_adcs; i++) snd_hda_codec_write(codec, spec->adc_nids[i], 0, AC_VERB_SET_POWER_STATE, AC_PWRST_D3); /* override some hints */ stac_store_hints(codec); /* set up GPIO */ gpio = spec->gpio_data; /* turn on EAPD statically when spec->eapd_switch isn't set. * otherwise, unsol event will turn it on/off dynamically */ if (!spec->eapd_switch) gpio |= spec->eapd_mask; stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, gpio); /* set up pins */ if (spec->hp_detect) { /* Enable unsolicited responses on the HP widget */ for (i = 0; i < cfg->hp_outs; i++) { hda_nid_t nid = cfg->hp_pins[i]; enable_pin_detect(codec, nid, STAC_HP_EVENT); } if (cfg->line_out_type == AUTO_PIN_LINE_OUT && cfg->speaker_outs > 0) { /* enable pin-detect for line-outs as well */ for (i = 0; i < cfg->line_outs; i++) { hda_nid_t nid = cfg->line_out_pins[i]; enable_pin_detect(codec, nid, STAC_LO_EVENT); } } /* force to enable the first line-out; the others are set up * in unsol_event */ stac92xx_auto_set_pinctl(codec, spec->autocfg.line_out_pins[0], AC_PINCTL_OUT_EN); /* fake event to set up pins */ stac_fake_hp_events(codec); } else { stac92xx_auto_init_multi_out(codec); stac92xx_auto_init_hp_out(codec); for (i = 0; i < cfg->hp_outs; i++) stac_toggle_power_map(codec, cfg->hp_pins[i], 1); } if (spec->auto_mic) { /* initialize connection to analog input */ if (spec->dmux_nids) snd_hda_codec_write_cache(codec, spec->dmux_nids[0], 0, AC_VERB_SET_CONNECT_SEL, 0); if (enable_pin_detect(codec, spec->ext_mic.pin, STAC_MIC_EVENT)) stac_issue_unsol_event(codec, spec->ext_mic.pin); if (enable_pin_detect(codec, spec->dock_mic.pin, STAC_MIC_EVENT)) stac_issue_unsol_event(codec, spec->dock_mic.pin); } for (i = 0; i < cfg->num_inputs; i++) { hda_nid_t nid = cfg->inputs[i].pin; int type = cfg->inputs[i].type; unsigned int pinctl, conf; if (type == AUTO_PIN_MIC) { /* for mic pins, force to initialize */ pinctl = stac92xx_get_default_vref(codec, nid); pinctl |= AC_PINCTL_IN_EN; stac92xx_auto_set_pinctl(codec, nid, pinctl); } else { pinctl = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0); /* if PINCTL already set then skip */ /* Also, if both INPUT and OUTPUT are set, * it must be a BIOS bug; need to override, too */ if (!(pinctl & AC_PINCTL_IN_EN) || (pinctl & AC_PINCTL_OUT_EN)) { pinctl &= ~AC_PINCTL_OUT_EN; pinctl |= AC_PINCTL_IN_EN; stac92xx_auto_set_pinctl(codec, nid, pinctl); } } conf = snd_hda_codec_get_pincfg(codec, nid); if (get_defcfg_connect(conf) != AC_JACK_PORT_FIXED) { if (enable_pin_detect(codec, nid, STAC_INSERT_EVENT)) stac_issue_unsol_event(codec, nid); } } for (i = 0; i < spec->num_dmics; i++) stac92xx_auto_set_pinctl(codec, spec->dmic_nids[i], AC_PINCTL_IN_EN); if (cfg->dig_out_pins[0]) stac92xx_auto_set_pinctl(codec, cfg->dig_out_pins[0], AC_PINCTL_OUT_EN); if (cfg->dig_in_pin) stac92xx_auto_set_pinctl(codec, cfg->dig_in_pin, AC_PINCTL_IN_EN); for (i = 0; i < spec->num_pwrs; i++) { hda_nid_t nid = spec->pwr_nids[i]; int pinctl, def_conf; /* power on when no jack detection is available */ /* or when the VREF is used for controlling LED */ if (!spec->hp_detect || spec->vref_mute_led_nid == nid) { stac_toggle_power_map(codec, nid, 1); continue; } if (is_nid_out_jack_pin(cfg, nid)) continue; /* already has an unsol event */ pinctl = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0); /* outputs are only ports capable of power management * any attempts on powering down a input port cause the * referenced VREF to act quirky. */ if (pinctl & AC_PINCTL_IN_EN) { stac_toggle_power_map(codec, nid, 1); continue; } def_conf = snd_hda_codec_get_pincfg(codec, nid); def_conf = get_defcfg_connect(def_conf); /* skip any ports that don't have jacks since presence * detection is useless */ if (def_conf != AC_JACK_PORT_NONE && !is_jack_detectable(codec, nid)) { stac_toggle_power_map(codec, nid, 1); continue; } if (enable_pin_detect(codec, nid, STAC_PWR_EVENT)) { stac_issue_unsol_event(codec, nid); continue; } /* none of the above, turn the port OFF */ stac_toggle_power_map(codec, nid, 0); } snd_hda_jack_report_sync(codec); /* sync mute LED */ snd_hda_sync_vmaster_hook(&spec->vmaster_mute); if (spec->dac_list) stac92xx_power_down(codec); return 0; } static void stac92xx_free_kctls(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; if (spec->kctls.list) { struct snd_kcontrol_new *kctl = spec->kctls.list; int i; for (i = 0; i < spec->kctls.used; i++) kfree(kctl[i].name); } snd_array_free(&spec->kctls); } static void stac92xx_shutup_pins(struct hda_codec *codec) { unsigned int i, def_conf; if (codec->bus->shutdown) return; for (i = 0; i < codec->init_pins.used; i++) { struct hda_pincfg *pin = snd_array_elem(&codec->init_pins, i); def_conf = snd_hda_codec_get_pincfg(codec, pin->nid); if (get_defcfg_connect(def_conf) != AC_JACK_PORT_NONE) snd_hda_codec_write(codec, pin->nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0); } } static void stac92xx_shutup(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; stac92xx_shutup_pins(codec); if (spec->eapd_mask) stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data & ~spec->eapd_mask); } static void stac92xx_free(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; if (! spec) return; stac92xx_shutup(codec); kfree(spec); snd_hda_detach_beep_device(codec); } static void stac92xx_set_pinctl(struct hda_codec *codec, hda_nid_t nid, unsigned int flag) { unsigned int old_ctl, pin_ctl; pin_ctl = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0x00); if (pin_ctl & AC_PINCTL_IN_EN) { /* * we need to check the current set-up direction of * shared input pins since they can be switched via * "xxx as Output" mixer switch */ struct sigmatel_spec *spec = codec->spec; if (nid == spec->line_switch || nid == spec->mic_switch) return; } old_ctl = pin_ctl; /* if setting pin direction bits, clear the current direction bits first */ if (flag & (AC_PINCTL_IN_EN | AC_PINCTL_OUT_EN)) pin_ctl &= ~(AC_PINCTL_IN_EN | AC_PINCTL_OUT_EN); pin_ctl |= flag; if (old_ctl != pin_ctl) snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, pin_ctl); } static void stac92xx_reset_pinctl(struct hda_codec *codec, hda_nid_t nid, unsigned int flag) { unsigned int pin_ctl = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0x00); if (pin_ctl & flag) snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, pin_ctl & ~flag); } static inline int get_pin_presence(struct hda_codec *codec, hda_nid_t nid) { if (!nid) return 0; return snd_hda_jack_detect(codec, nid); } static void stac92xx_line_out_detect(struct hda_codec *codec, int presence) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int i; for (i = 0; i < cfg->line_outs; i++) { if (presence) break; presence = get_pin_presence(codec, cfg->line_out_pins[i]); if (presence) { unsigned int pinctl; pinctl = snd_hda_codec_read(codec, cfg->line_out_pins[i], 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0); if (pinctl & AC_PINCTL_IN_EN) presence = 0; /* mic- or line-input */ } } if (presence) { /* disable speakers */ for (i = 0; i < cfg->speaker_outs; i++) stac92xx_reset_pinctl(codec, cfg->speaker_pins[i], AC_PINCTL_OUT_EN); if (spec->eapd_mask && spec->eapd_switch) stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data & ~spec->eapd_mask); } else { /* enable speakers */ for (i = 0; i < cfg->speaker_outs; i++) stac92xx_set_pinctl(codec, cfg->speaker_pins[i], AC_PINCTL_OUT_EN); if (spec->eapd_mask && spec->eapd_switch) stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data | spec->eapd_mask); } } /* return non-zero if the hp-pin of the given array index isn't * a jack-detection target */ static int no_hp_sensing(struct sigmatel_spec *spec, int i) { struct auto_pin_cfg *cfg = &spec->autocfg; /* ignore sensing of shared line and mic jacks */ if (cfg->hp_pins[i] == spec->line_switch) return 1; if (cfg->hp_pins[i] == spec->mic_switch) return 1; /* ignore if the pin is set as line-out */ if (cfg->hp_pins[i] == spec->hp_switch) return 1; return 0; } static void stac92xx_hp_detect(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int i, presence; presence = 0; if (spec->gpio_mute) presence = !(snd_hda_codec_read(codec, codec->afg, 0, AC_VERB_GET_GPIO_DATA, 0) & spec->gpio_mute); for (i = 0; i < cfg->hp_outs; i++) { if (presence) break; if (no_hp_sensing(spec, i)) continue; presence = get_pin_presence(codec, cfg->hp_pins[i]); if (presence) { unsigned int pinctl; pinctl = snd_hda_codec_read(codec, cfg->hp_pins[i], 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0); if (pinctl & AC_PINCTL_IN_EN) presence = 0; /* mic- or line-input */ } } if (presence) { /* disable lineouts */ if (spec->hp_switch) stac92xx_reset_pinctl(codec, spec->hp_switch, AC_PINCTL_OUT_EN); for (i = 0; i < cfg->line_outs; i++) stac92xx_reset_pinctl(codec, cfg->line_out_pins[i], AC_PINCTL_OUT_EN); } else { /* enable lineouts */ if (spec->hp_switch) stac92xx_set_pinctl(codec, spec->hp_switch, AC_PINCTL_OUT_EN); for (i = 0; i < cfg->line_outs; i++) stac92xx_set_pinctl(codec, cfg->line_out_pins[i], AC_PINCTL_OUT_EN); } stac92xx_line_out_detect(codec, presence); /* toggle hp outs */ for (i = 0; i < cfg->hp_outs; i++) { unsigned int val = AC_PINCTL_OUT_EN | AC_PINCTL_HP_EN; if (no_hp_sensing(spec, i)) continue; if (1 /*presence*/) stac92xx_set_pinctl(codec, cfg->hp_pins[i], val); #if 0 /* FIXME */ /* Resetting the pinctl like below may lead to (a sort of) regressions * on some devices since they use the HP pin actually for line/speaker * outs although the default pin config shows a different pin (that is * wrong and useless). * * So, it's basically a problem of default pin configs, likely a BIOS issue. * But, disabling the code below just works around it, and I'm too tired of * bug reports with such devices... */ else stac92xx_reset_pinctl(codec, cfg->hp_pins[i], val); #endif /* FIXME */ } } static void stac_toggle_power_map(struct hda_codec *codec, hda_nid_t nid, int enable) { struct sigmatel_spec *spec = codec->spec; unsigned int idx, val; for (idx = 0; idx < spec->num_pwrs; idx++) { if (spec->pwr_nids[idx] == nid) break; } if (idx >= spec->num_pwrs) return; idx = 1 << idx; val = snd_hda_codec_read(codec, codec->afg, 0, 0x0fec, 0x0) & 0xff; if (enable) val &= ~idx; else val |= idx; /* power down unused output ports */ snd_hda_codec_write(codec, codec->afg, 0, 0x7ec, val); } static void stac92xx_pin_sense(struct hda_codec *codec, hda_nid_t nid) { stac_toggle_power_map(codec, nid, get_pin_presence(codec, nid)); } /* get the pin connection (fixed, none, etc) */ static unsigned int stac_get_defcfg_connect(struct hda_codec *codec, int idx) { struct sigmatel_spec *spec = codec->spec; unsigned int cfg; cfg = snd_hda_codec_get_pincfg(codec, spec->pin_nids[idx]); return get_defcfg_connect(cfg); } static int stac92xx_connected_ports(struct hda_codec *codec, const hda_nid_t *nids, int num_nids) { struct sigmatel_spec *spec = codec->spec; int idx, num; unsigned int def_conf; for (num = 0; num < num_nids; num++) { for (idx = 0; idx < spec->num_pins; idx++) if (spec->pin_nids[idx] == nids[num]) break; if (idx >= spec->num_pins) break; def_conf = stac_get_defcfg_connect(codec, idx); if (def_conf == AC_JACK_PORT_NONE) break; } return num; } static void stac92xx_mic_detect(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct sigmatel_mic_route *mic; if (get_pin_presence(codec, spec->ext_mic.pin)) mic = &spec->ext_mic; else if (get_pin_presence(codec, spec->dock_mic.pin)) mic = &spec->dock_mic; else mic = &spec->int_mic; if (mic->dmux_idx >= 0) snd_hda_codec_write_cache(codec, spec->dmux_nids[0], 0, AC_VERB_SET_CONNECT_SEL, mic->dmux_idx); if (mic->mux_idx >= 0) snd_hda_codec_write_cache(codec, spec->mux_nids[0], 0, AC_VERB_SET_CONNECT_SEL, mic->mux_idx); } static void handle_unsol_event(struct hda_codec *codec, struct hda_jack_tbl *event) { struct sigmatel_spec *spec = codec->spec; int data; switch (event->action) { case STAC_HP_EVENT: case STAC_LO_EVENT: stac92xx_hp_detect(codec); break; case STAC_MIC_EVENT: stac92xx_mic_detect(codec); break; } switch (event->action) { case STAC_HP_EVENT: case STAC_LO_EVENT: case STAC_MIC_EVENT: case STAC_INSERT_EVENT: case STAC_PWR_EVENT: if (spec->num_pwrs > 0) stac92xx_pin_sense(codec, event->nid); switch (codec->subsystem_id) { case 0x103c308f: if (event->nid == 0xb) { int pin = AC_PINCTL_IN_EN; if (get_pin_presence(codec, 0xa) && get_pin_presence(codec, 0xb)) pin |= AC_PINCTL_VREF_80; if (!get_pin_presence(codec, 0xb)) pin |= AC_PINCTL_VREF_80; /* toggle VREF state based on mic + hp pin * status */ stac92xx_auto_set_pinctl(codec, 0x0a, pin); } } break; case STAC_VREF_EVENT: data = snd_hda_codec_read(codec, codec->afg, 0, AC_VERB_GET_GPIO_DATA, 0); /* toggle VREF state based on GPIOx status */ snd_hda_codec_write(codec, codec->afg, 0, 0x7e0, !!(data & (1 << event->private_data))); break; } } static void stac_issue_unsol_event(struct hda_codec *codec, hda_nid_t nid) { struct hda_jack_tbl *event = snd_hda_jack_tbl_get(codec, nid); if (!event) return; handle_unsol_event(codec, event); } static void stac92xx_unsol_event(struct hda_codec *codec, unsigned int res) { struct hda_jack_tbl *event; int tag; tag = (res >> 26) & 0x7f; event = snd_hda_jack_tbl_get_from_tag(codec, tag); if (!event) return; event->jack_dirty = 1; handle_unsol_event(codec, event); snd_hda_jack_report_sync(codec); } static int hp_blike_system(u32 subsystem_id); static void set_hp_led_gpio(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; unsigned int gpio; if (spec->gpio_led) return; gpio = snd_hda_param_read(codec, codec->afg, AC_PAR_GPIO_CAP); gpio &= AC_GPIO_IO_COUNT; if (gpio > 3) spec->gpio_led = 0x08; /* GPIO 3 */ else spec->gpio_led = 0x01; /* GPIO 0 */ } /* * This method searches for the mute LED GPIO configuration * provided as OEM string in SMBIOS. The format of that string * is HP_Mute_LED_P_G or HP_Mute_LED_P * where P can be 0 or 1 and defines mute LED GPIO control state (low/high) * that corresponds to the NOT muted state of the master volume * and G is the index of the GPIO to use as the mute LED control (0..9) * If _G portion is missing it is assigned based on the codec ID * * So, HP B-series like systems may have HP_Mute_LED_0 (current models) * or HP_Mute_LED_0_3 (future models) OEM SMBIOS strings * * * The dv-series laptops don't seem to have the HP_Mute_LED* strings in * SMBIOS - at least the ones I have seen do not have them - which include * my own system (HP Pavilion dv6-1110ax) and my cousin's * HP Pavilion dv9500t CTO. * Need more information on whether it is true across the entire series. * -- kunal */ static int find_mute_led_cfg(struct hda_codec *codec, int default_polarity) { struct sigmatel_spec *spec = codec->spec; const struct dmi_device *dev = NULL; if ((codec->subsystem_id >> 16) == PCI_VENDOR_ID_HP) { while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, NULL, dev))) { if (sscanf(dev->name, "HP_Mute_LED_%d_%x", &spec->gpio_led_polarity, &spec->gpio_led) == 2) { unsigned int max_gpio; max_gpio = snd_hda_param_read(codec, codec->afg, AC_PAR_GPIO_CAP); max_gpio &= AC_GPIO_IO_COUNT; if (spec->gpio_led < max_gpio) spec->gpio_led = 1 << spec->gpio_led; else spec->vref_mute_led_nid = spec->gpio_led; return 1; } if (sscanf(dev->name, "HP_Mute_LED_%d", &spec->gpio_led_polarity) == 1) { set_hp_led_gpio(codec); return 1; } /* BIOS bug: unfilled OEM string */ if (strstr(dev->name, "HP_Mute_LED_P_G")) { set_hp_led_gpio(codec); switch (codec->subsystem_id) { case 0x103c148a: spec->gpio_led_polarity = 0; break; default: spec->gpio_led_polarity = 1; break; } return 1; } } /* * Fallback case - if we don't find the DMI strings, * we statically set the GPIO - if not a B-series system * and default polarity is provided */ if (!hp_blike_system(codec->subsystem_id) && (default_polarity == 0 || default_polarity == 1)) { set_hp_led_gpio(codec); spec->gpio_led_polarity = default_polarity; return 1; } } return 0; } static int hp_blike_system(u32 subsystem_id) { switch (subsystem_id) { case 0x103c1520: case 0x103c1521: case 0x103c1523: case 0x103c1524: case 0x103c1525: case 0x103c1722: case 0x103c1723: case 0x103c1724: case 0x103c1725: case 0x103c1726: case 0x103c1727: case 0x103c1728: case 0x103c1729: case 0x103c172a: case 0x103c172b: case 0x103c307e: case 0x103c307f: case 0x103c3080: case 0x103c3081: case 0x103c7007: case 0x103c7008: return 1; } return 0; } #ifdef CONFIG_PROC_FS static void stac92hd_proc_hook(struct snd_info_buffer *buffer, struct hda_codec *codec, hda_nid_t nid) { if (nid == codec->afg) snd_iprintf(buffer, "Power-Map: 0x%02x\n", snd_hda_codec_read(codec, nid, 0, 0x0fec, 0x0)); } static void analog_loop_proc_hook(struct snd_info_buffer *buffer, struct hda_codec *codec, unsigned int verb) { snd_iprintf(buffer, "Analog Loopback: 0x%02x\n", snd_hda_codec_read(codec, codec->afg, 0, verb, 0)); } /* stac92hd71bxx, stac92hd73xx */ static void stac92hd7x_proc_hook(struct snd_info_buffer *buffer, struct hda_codec *codec, hda_nid_t nid) { stac92hd_proc_hook(buffer, codec, nid); if (nid == codec->afg) analog_loop_proc_hook(buffer, codec, 0xfa0); } static void stac9205_proc_hook(struct snd_info_buffer *buffer, struct hda_codec *codec, hda_nid_t nid) { if (nid == codec->afg) analog_loop_proc_hook(buffer, codec, 0xfe0); } static void stac927x_proc_hook(struct snd_info_buffer *buffer, struct hda_codec *codec, hda_nid_t nid) { if (nid == codec->afg) analog_loop_proc_hook(buffer, codec, 0xfeb); } #else #define stac92hd_proc_hook NULL #define stac92hd7x_proc_hook NULL #define stac9205_proc_hook NULL #define stac927x_proc_hook NULL #endif #ifdef CONFIG_PM static int stac92xx_resume(struct hda_codec *codec) { stac92xx_init(codec); snd_hda_codec_resume_amp(codec); snd_hda_codec_resume_cache(codec); /* fake event to set up pins again to override cached values */ stac_fake_hp_events(codec); return 0; } static int stac92xx_suspend(struct hda_codec *codec, pm_message_t state) { stac92xx_shutup(codec); return 0; } static int stac92xx_pre_resume(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; /* sync mute LED */ if (spec->vref_mute_led_nid) stac_vrefout_set(codec, spec->vref_mute_led_nid, spec->vref_led); else if (spec->gpio_led) stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data); return 0; } static void stac92xx_set_power_state(struct hda_codec *codec, hda_nid_t fg, unsigned int power_state) { unsigned int afg_power_state = power_state; struct sigmatel_spec *spec = codec->spec; if (power_state == AC_PWRST_D3) { if (spec->vref_mute_led_nid) { /* with vref-out pin used for mute led control * codec AFG is prevented from D3 state */ afg_power_state = AC_PWRST_D1; } /* this delay seems necessary to avoid click noise at power-down */ msleep(100); } snd_hda_codec_read(codec, fg, 0, AC_VERB_SET_POWER_STATE, afg_power_state); snd_hda_codec_set_power_to_all(codec, fg, power_state, true); } #else #define stac92xx_suspend NULL #define stac92xx_resume NULL #define stac92xx_pre_resume NULL #define stac92xx_set_power_state NULL #endif /* CONFIG_PM */ /* update mute-LED accoring to the master switch */ static void stac92xx_update_led_status(struct hda_codec *codec, int enabled) { struct sigmatel_spec *spec = codec->spec; int muted = !enabled; if (!spec->gpio_led) return; /* LED state is inverted on these systems */ if (spec->gpio_led_polarity) muted = !muted; if (!spec->vref_mute_led_nid) { if (muted) spec->gpio_data |= spec->gpio_led; else spec->gpio_data &= ~spec->gpio_led; stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data); } else { spec->vref_led = muted ? AC_PINCTL_VREF_50 : AC_PINCTL_VREF_GRD; stac_vrefout_set(codec, spec->vref_mute_led_nid, spec->vref_led); } } static const struct hda_codec_ops stac92xx_patch_ops = { .build_controls = stac92xx_build_controls, .build_pcms = stac92xx_build_pcms, .init = stac92xx_init, .free = stac92xx_free, .unsol_event = stac92xx_unsol_event, #ifdef CONFIG_PM .suspend = stac92xx_suspend, .resume = stac92xx_resume, #endif .reboot_notify = stac92xx_shutup, }; static int patch_stac9200(struct hda_codec *codec) { struct sigmatel_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->no_trigger_sense = 1; codec->spec = spec; spec->linear_tone_beep = 1; spec->num_pins = ARRAY_SIZE(stac9200_pin_nids); spec->pin_nids = stac9200_pin_nids; spec->board_config = snd_hda_check_board_config(codec, STAC_9200_MODELS, stac9200_models, stac9200_cfg_tbl); if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac9200_brd_tbl[spec->board_config]); spec->multiout.max_channels = 2; spec->multiout.num_dacs = 1; spec->multiout.dac_nids = stac9200_dac_nids; spec->adc_nids = stac9200_adc_nids; spec->mux_nids = stac9200_mux_nids; spec->num_muxes = 1; spec->num_dmics = 0; spec->num_adcs = 1; spec->num_pwrs = 0; if (spec->board_config == STAC_9200_M4 || spec->board_config == STAC_9200_M4_2 || spec->board_config == STAC_9200_OQO) spec->init = stac9200_eapd_init; else spec->init = stac9200_core_init; spec->mixer = stac9200_mixer; if (spec->board_config == STAC_9200_PANASONIC) { spec->gpio_mask = spec->gpio_dir = 0x09; spec->gpio_data = 0x00; } err = stac9200_parse_auto_config(codec); if (err < 0) { stac92xx_free(codec); return err; } /* CF-74 has no headphone detection, and the driver should *NOT* * do detection and HP/speaker toggle because the hardware does it. */ if (spec->board_config == STAC_9200_PANASONIC) spec->hp_detect = 0; codec->patch_ops = stac92xx_patch_ops; return 0; } static int patch_stac925x(struct hda_codec *codec) { struct sigmatel_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->no_trigger_sense = 1; codec->spec = spec; spec->linear_tone_beep = 1; spec->num_pins = ARRAY_SIZE(stac925x_pin_nids); spec->pin_nids = stac925x_pin_nids; /* Check first for codec ID */ spec->board_config = snd_hda_check_board_codec_sid_config(codec, STAC_925x_MODELS, stac925x_models, stac925x_codec_id_cfg_tbl); /* Now checks for PCI ID, if codec ID is not found */ if (spec->board_config < 0) spec->board_config = snd_hda_check_board_config(codec, STAC_925x_MODELS, stac925x_models, stac925x_cfg_tbl); again: if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac925x_brd_tbl[spec->board_config]); spec->multiout.max_channels = 2; spec->multiout.num_dacs = 1; spec->multiout.dac_nids = stac925x_dac_nids; spec->adc_nids = stac925x_adc_nids; spec->mux_nids = stac925x_mux_nids; spec->num_muxes = 1; spec->num_adcs = 1; spec->num_pwrs = 0; switch (codec->vendor_id) { case 0x83847632: /* STAC9202 */ case 0x83847633: /* STAC9202D */ case 0x83847636: /* STAC9251 */ case 0x83847637: /* STAC9251D */ spec->num_dmics = STAC925X_NUM_DMICS; spec->dmic_nids = stac925x_dmic_nids; spec->num_dmuxes = ARRAY_SIZE(stac925x_dmux_nids); spec->dmux_nids = stac925x_dmux_nids; break; default: spec->num_dmics = 0; break; } spec->init = stac925x_core_init; spec->mixer = stac925x_mixer; spec->num_caps = 1; spec->capvols = stac925x_capvols; spec->capsws = stac925x_capsws; err = stac92xx_parse_auto_config(codec); if (!err) { if (spec->board_config < 0) { printk(KERN_WARNING "hda_codec: No auto-config is " "available, default to model=ref\n"); spec->board_config = STAC_925x_REF; goto again; } err = -EINVAL; } if (err < 0) { stac92xx_free(codec); return err; } codec->patch_ops = stac92xx_patch_ops; return 0; } static int patch_stac92hd73xx(struct hda_codec *codec) { struct sigmatel_spec *spec; hda_nid_t conn[STAC92HD73_DAC_COUNT + 2]; int err = 0; int num_dacs; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->no_trigger_sense = 1; codec->spec = spec; spec->linear_tone_beep = 0; codec->slave_dig_outs = stac92hd73xx_slave_dig_outs; spec->num_pins = ARRAY_SIZE(stac92hd73xx_pin_nids); spec->pin_nids = stac92hd73xx_pin_nids; spec->board_config = snd_hda_check_board_config(codec, STAC_92HD73XX_MODELS, stac92hd73xx_models, stac92hd73xx_cfg_tbl); /* check codec subsystem id if not found */ if (spec->board_config < 0) spec->board_config = snd_hda_check_board_codec_sid_config(codec, STAC_92HD73XX_MODELS, stac92hd73xx_models, stac92hd73xx_codec_id_cfg_tbl); again: if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac92hd73xx_brd_tbl[spec->board_config]); num_dacs = snd_hda_get_connections(codec, 0x0a, conn, STAC92HD73_DAC_COUNT + 2) - 1; if (num_dacs < 3 || num_dacs > 5) { printk(KERN_WARNING "hda_codec: Could not determine " "number of channels defaulting to DAC count\n"); num_dacs = STAC92HD73_DAC_COUNT; } spec->init = stac92hd73xx_core_init; switch (num_dacs) { case 0x3: /* 6 Channel */ spec->aloopback_ctl = stac92hd73xx_6ch_loopback; break; case 0x4: /* 8 Channel */ spec->aloopback_ctl = stac92hd73xx_8ch_loopback; break; case 0x5: /* 10 Channel */ spec->aloopback_ctl = stac92hd73xx_10ch_loopback; break; } spec->multiout.dac_nids = spec->dac_nids; spec->aloopback_mask = 0x01; spec->aloopback_shift = 8; spec->digbeep_nid = 0x1c; spec->mux_nids = stac92hd73xx_mux_nids; spec->adc_nids = stac92hd73xx_adc_nids; spec->dmic_nids = stac92hd73xx_dmic_nids; spec->dmux_nids = stac92hd73xx_dmux_nids; spec->smux_nids = stac92hd73xx_smux_nids; spec->num_muxes = ARRAY_SIZE(stac92hd73xx_mux_nids); spec->num_adcs = ARRAY_SIZE(stac92hd73xx_adc_nids); spec->num_dmuxes = ARRAY_SIZE(stac92hd73xx_dmux_nids); spec->num_caps = STAC92HD73XX_NUM_CAPS; spec->capvols = stac92hd73xx_capvols; spec->capsws = stac92hd73xx_capsws; switch (spec->board_config) { case STAC_DELL_EQ: spec->init = dell_eq_core_init; /* fallthru */ case STAC_DELL_M6_AMIC: case STAC_DELL_M6_DMIC: case STAC_DELL_M6_BOTH: spec->num_smuxes = 0; spec->eapd_switch = 0; switch (spec->board_config) { case STAC_DELL_M6_AMIC: /* Analog Mics */ snd_hda_codec_set_pincfg(codec, 0x0b, 0x90A70170); spec->num_dmics = 0; break; case STAC_DELL_M6_DMIC: /* Digital Mics */ snd_hda_codec_set_pincfg(codec, 0x13, 0x90A60160); spec->num_dmics = 1; break; case STAC_DELL_M6_BOTH: /* Both */ snd_hda_codec_set_pincfg(codec, 0x0b, 0x90A70170); snd_hda_codec_set_pincfg(codec, 0x13, 0x90A60160); spec->num_dmics = 1; break; } break; case STAC_ALIENWARE_M17X: spec->num_dmics = STAC92HD73XX_NUM_DMICS; spec->num_smuxes = ARRAY_SIZE(stac92hd73xx_smux_nids); spec->eapd_switch = 0; break; default: spec->num_dmics = STAC92HD73XX_NUM_DMICS; spec->num_smuxes = ARRAY_SIZE(stac92hd73xx_smux_nids); spec->eapd_switch = 1; break; } if (spec->board_config != STAC_92HD73XX_REF) { /* GPIO0 High = Enable EAPD */ spec->eapd_mask = spec->gpio_mask = spec->gpio_dir = 0x1; spec->gpio_data = 0x01; } spec->num_pwrs = ARRAY_SIZE(stac92hd73xx_pwr_nids); spec->pwr_nids = stac92hd73xx_pwr_nids; err = stac92xx_parse_auto_config(codec); if (!err) { if (spec->board_config < 0) { printk(KERN_WARNING "hda_codec: No auto-config is " "available, default to model=ref\n"); spec->board_config = STAC_92HD73XX_REF; goto again; } err = -EINVAL; } if (err < 0) { stac92xx_free(codec); return err; } if (spec->board_config == STAC_92HD73XX_NO_JD) spec->hp_detect = 0; codec->patch_ops = stac92xx_patch_ops; codec->proc_widget_hook = stac92hd7x_proc_hook; return 0; } static int hp_bnb2011_with_dock(struct hda_codec *codec) { if (codec->vendor_id != 0x111d7605 && codec->vendor_id != 0x111d76d1) return 0; switch (codec->subsystem_id) { case 0x103c1618: case 0x103c1619: case 0x103c161a: case 0x103c161b: case 0x103c161c: case 0x103c161d: case 0x103c161e: case 0x103c161f: case 0x103c162a: case 0x103c162b: case 0x103c1630: case 0x103c1631: case 0x103c1633: case 0x103c1634: case 0x103c1635: case 0x103c3587: case 0x103c3588: case 0x103c3589: case 0x103c358a: case 0x103c3667: case 0x103c3668: case 0x103c3669: return 1; } return 0; } static void stac92hd8x_add_pin(struct hda_codec *codec, hda_nid_t nid) { struct sigmatel_spec *spec = codec->spec; unsigned int def_conf = snd_hda_codec_get_pincfg(codec, nid); int i; spec->auto_pin_nids[spec->auto_pin_cnt] = nid; spec->auto_pin_cnt++; if (get_defcfg_device(def_conf) == AC_JACK_MIC_IN && get_defcfg_connect(def_conf) != AC_JACK_PORT_NONE) { for (i = 0; i < ARRAY_SIZE(stac92hd83xxx_dmic_nids); i++) { if (nid == stac92hd83xxx_dmic_nids[i]) { spec->auto_dmic_nids[spec->auto_dmic_cnt] = nid; spec->auto_dmic_cnt++; } } } } static void stac92hd8x_add_adc(struct hda_codec *codec, hda_nid_t nid) { struct sigmatel_spec *spec = codec->spec; spec->auto_adc_nids[spec->auto_adc_cnt] = nid; spec->auto_adc_cnt++; } static void stac92hd8x_add_mux(struct hda_codec *codec, hda_nid_t nid) { int i, j; struct sigmatel_spec *spec = codec->spec; for (i = 0; i < spec->auto_adc_cnt; i++) { if (get_connection_index(codec, spec->auto_adc_nids[i], nid) >= 0) { /* mux and volume for adc_nids[i] */ if (!spec->auto_mux_nids[i]) { spec->auto_mux_nids[i] = nid; /* 92hd codecs capture volume is in mux */ spec->auto_capvols[i] = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT); } for (j = 0; j < spec->auto_dmic_cnt; j++) { if (get_connection_index(codec, nid, spec->auto_dmic_nids[j]) >= 0) { /* dmux for adc_nids[i] */ if (!spec->auto_dmux_nids[i]) spec->auto_dmux_nids[i] = nid; break; } } break; } } } static void stac92hd8x_fill_auto_spec(struct hda_codec *codec) { hda_nid_t nid, end_nid; unsigned int wid_caps, wid_type; struct sigmatel_spec *spec = codec->spec; end_nid = codec->start_nid + codec->num_nodes; for (nid = codec->start_nid; nid < end_nid; nid++) { wid_caps = get_wcaps(codec, nid); wid_type = get_wcaps_type(wid_caps); if (wid_type == AC_WID_PIN) stac92hd8x_add_pin(codec, nid); if (wid_type == AC_WID_AUD_IN && !(wid_caps & AC_WCAP_DIGITAL)) stac92hd8x_add_adc(codec, nid); } for (nid = codec->start_nid; nid < end_nid; nid++) { wid_caps = get_wcaps(codec, nid); wid_type = get_wcaps_type(wid_caps); if (wid_type == AC_WID_AUD_SEL) stac92hd8x_add_mux(codec, nid); } spec->pin_nids = spec->auto_pin_nids; spec->num_pins = spec->auto_pin_cnt; spec->adc_nids = spec->auto_adc_nids; spec->num_adcs = spec->auto_adc_cnt; spec->capvols = spec->auto_capvols; spec->capsws = spec->auto_capvols; spec->num_caps = spec->auto_adc_cnt; spec->mux_nids = spec->auto_mux_nids; spec->num_muxes = spec->auto_adc_cnt; spec->dmux_nids = spec->auto_dmux_nids; spec->num_dmuxes = spec->auto_adc_cnt; spec->dmic_nids = spec->auto_dmic_nids; spec->num_dmics = spec->auto_dmic_cnt; } static int patch_stac92hd83xxx(struct hda_codec *codec) { struct sigmatel_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; if (hp_bnb2011_with_dock(codec)) { snd_hda_codec_set_pincfg(codec, 0xa, 0x2101201f); snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e); } codec->no_trigger_sense = 1; codec->spec = spec; stac92hd8x_fill_auto_spec(codec); spec->linear_tone_beep = 0; codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs; spec->digbeep_nid = 0x21; spec->pwr_nids = stac92hd83xxx_pwr_nids; spec->num_pwrs = ARRAY_SIZE(stac92hd83xxx_pwr_nids); spec->multiout.dac_nids = spec->dac_nids; spec->init = stac92hd83xxx_core_init; spec->board_config = snd_hda_check_board_config(codec, STAC_92HD83XXX_MODELS, stac92hd83xxx_models, stac92hd83xxx_cfg_tbl); /* check codec subsystem id if not found */ if (spec->board_config < 0) spec->board_config = snd_hda_check_board_codec_sid_config(codec, STAC_92HD83XXX_MODELS, stac92hd83xxx_models, stac92hd83xxx_codec_id_cfg_tbl); again: if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac92hd83xxx_brd_tbl[spec->board_config]); codec->patch_ops = stac92xx_patch_ops; switch (spec->board_config) { case STAC_HP_ZEPHYR: spec->init = stac92hd83xxx_hp_zephyr_init; break; } if (find_mute_led_cfg(codec, -1/*no default cfg*/)) snd_printd("mute LED gpio %d polarity %d\n", spec->gpio_led, spec->gpio_led_polarity); if (spec->gpio_led) { if (!spec->vref_mute_led_nid) { spec->gpio_mask |= spec->gpio_led; spec->gpio_dir |= spec->gpio_led; spec->gpio_data |= spec->gpio_led; } else { codec->patch_ops.set_power_state = stac92xx_set_power_state; } #ifdef CONFIG_PM codec->patch_ops.pre_resume = stac92xx_pre_resume; #endif } err = stac92xx_parse_auto_config(codec); if (!err) { if (spec->board_config < 0) { printk(KERN_WARNING "hda_codec: No auto-config is " "available, default to model=ref\n"); spec->board_config = STAC_92HD83XXX_REF; goto again; } err = -EINVAL; } if (err < 0) { stac92xx_free(codec); return err; } codec->proc_widget_hook = stac92hd_proc_hook; return 0; } static int stac92hd71bxx_connected_smuxes(struct hda_codec *codec, hda_nid_t dig0pin) { struct sigmatel_spec *spec = codec->spec; int idx; for (idx = 0; idx < spec->num_pins; idx++) if (spec->pin_nids[idx] == dig0pin) break; if ((idx + 2) >= spec->num_pins) return 0; /* dig1pin case */ if (stac_get_defcfg_connect(codec, idx + 1) != AC_JACK_PORT_NONE) return 2; /* dig0pin + dig2pin case */ if (stac_get_defcfg_connect(codec, idx + 2) != AC_JACK_PORT_NONE) return 2; if (stac_get_defcfg_connect(codec, idx) != AC_JACK_PORT_NONE) return 1; else return 0; } /* HP dv7 bass switch - GPIO5 */ #define stac_hp_bass_gpio_info snd_ctl_boolean_mono_info static int stac_hp_bass_gpio_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; ucontrol->value.integer.value[0] = !!(spec->gpio_data & 0x20); return 0; } static int stac_hp_bass_gpio_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; unsigned int gpio_data; gpio_data = (spec->gpio_data & ~0x20) | (ucontrol->value.integer.value[0] ? 0x20 : 0); if (gpio_data == spec->gpio_data) return 0; spec->gpio_data = gpio_data; stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data); return 1; } static const struct snd_kcontrol_new stac_hp_bass_sw_ctrl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = stac_hp_bass_gpio_info, .get = stac_hp_bass_gpio_get, .put = stac_hp_bass_gpio_put, }; static int stac_add_hp_bass_switch(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; if (!stac_control_new(spec, &stac_hp_bass_sw_ctrl, "Bass Speaker Playback Switch", 0)) return -ENOMEM; spec->gpio_mask |= 0x20; spec->gpio_dir |= 0x20; spec->gpio_data |= 0x20; return 0; } static int patch_stac92hd71bxx(struct hda_codec *codec) { struct sigmatel_spec *spec; const struct hda_verb *unmute_init = stac92hd71bxx_unmute_core_init; unsigned int pin_cfg; int err = 0; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->no_trigger_sense = 1; codec->spec = spec; spec->linear_tone_beep = 0; codec->patch_ops = stac92xx_patch_ops; spec->num_pins = STAC92HD71BXX_NUM_PINS; switch (codec->vendor_id) { case 0x111d76b6: case 0x111d76b7: spec->pin_nids = stac92hd71bxx_pin_nids_4port; break; case 0x111d7603: case 0x111d7608: /* On 92HD75Bx 0x27 isn't a pin nid */ spec->num_pins--; /* fallthrough */ default: spec->pin_nids = stac92hd71bxx_pin_nids_6port; } spec->num_pwrs = ARRAY_SIZE(stac92hd71bxx_pwr_nids); spec->board_config = snd_hda_check_board_config(codec, STAC_92HD71BXX_MODELS, stac92hd71bxx_models, stac92hd71bxx_cfg_tbl); again: if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac92hd71bxx_brd_tbl[spec->board_config]); if (spec->board_config != STAC_92HD71BXX_REF) { /* GPIO0 = EAPD */ spec->gpio_mask = 0x01; spec->gpio_dir = 0x01; spec->gpio_data = 0x01; } spec->dmic_nids = stac92hd71bxx_dmic_nids; spec->dmux_nids = stac92hd71bxx_dmux_nids; spec->num_caps = STAC92HD71BXX_NUM_CAPS; spec->capvols = stac92hd71bxx_capvols; spec->capsws = stac92hd71bxx_capsws; switch (codec->vendor_id) { case 0x111d76b6: /* 4 Port without Analog Mixer */ case 0x111d76b7: unmute_init++; /* fallthru */ case 0x111d76b4: /* 6 Port without Analog Mixer */ case 0x111d76b5: spec->init = stac92hd71bxx_core_init; codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs; spec->num_dmics = stac92xx_connected_ports(codec, stac92hd71bxx_dmic_nids, STAC92HD71BXX_NUM_DMICS); break; case 0x111d7608: /* 5 Port with Analog Mixer */ switch (spec->board_config) { case STAC_HP_M4: /* Enable VREF power saving on GPIO1 detect */ err = stac_add_event(codec, codec->afg, STAC_VREF_EVENT, 0x02); if (err < 0) return err; snd_hda_codec_write_cache(codec, codec->afg, 0, AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK, 0x02); snd_hda_jack_detect_enable(codec, codec->afg, 0); spec->gpio_mask |= 0x02; break; } if ((codec->revision_id & 0xf) == 0 || (codec->revision_id & 0xf) == 1) spec->stream_delay = 40; /* 40 milliseconds */ /* disable VSW */ spec->init = stac92hd71bxx_core_init; unmute_init++; snd_hda_codec_set_pincfg(codec, 0x0f, 0x40f000f0); snd_hda_codec_set_pincfg(codec, 0x19, 0x40f000f3); spec->dmic_nids = stac92hd71bxx_dmic_5port_nids; spec->num_dmics = stac92xx_connected_ports(codec, stac92hd71bxx_dmic_5port_nids, STAC92HD71BXX_NUM_DMICS - 1); break; case 0x111d7603: /* 6 Port with Analog Mixer */ if ((codec->revision_id & 0xf) == 1) spec->stream_delay = 40; /* 40 milliseconds */ /* fallthru */ default: spec->init = stac92hd71bxx_core_init; codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs; spec->num_dmics = stac92xx_connected_ports(codec, stac92hd71bxx_dmic_nids, STAC92HD71BXX_NUM_DMICS); break; } if (get_wcaps(codec, 0xa) & AC_WCAP_IN_AMP) snd_hda_sequence_write_cache(codec, unmute_init); spec->aloopback_ctl = stac92hd71bxx_loopback; spec->aloopback_mask = 0x50; spec->aloopback_shift = 0; spec->powerdown_adcs = 1; spec->digbeep_nid = 0x26; spec->mux_nids = stac92hd71bxx_mux_nids; spec->adc_nids = stac92hd71bxx_adc_nids; spec->smux_nids = stac92hd71bxx_smux_nids; spec->pwr_nids = stac92hd71bxx_pwr_nids; spec->num_muxes = ARRAY_SIZE(stac92hd71bxx_mux_nids); spec->num_adcs = ARRAY_SIZE(stac92hd71bxx_adc_nids); spec->num_dmuxes = ARRAY_SIZE(stac92hd71bxx_dmux_nids); spec->num_smuxes = stac92hd71bxx_connected_smuxes(codec, 0x1e); snd_printdd("Found board config: %d\n", spec->board_config); switch (spec->board_config) { case STAC_HP_M4: /* enable internal microphone */ snd_hda_codec_set_pincfg(codec, 0x0e, 0x01813040); stac92xx_auto_set_pinctl(codec, 0x0e, AC_PINCTL_IN_EN | AC_PINCTL_VREF_80); /* fallthru */ case STAC_DELL_M4_2: spec->num_dmics = 0; spec->num_smuxes = 0; spec->num_dmuxes = 0; break; case STAC_DELL_M4_1: case STAC_DELL_M4_3: spec->num_dmics = 1; spec->num_smuxes = 0; spec->num_dmuxes = 1; break; case STAC_HP_DV4_1222NR: spec->num_dmics = 1; /* I don't know if it needs 1 or 2 smuxes - will wait for * bug reports to fix if needed */ spec->num_smuxes = 1; spec->num_dmuxes = 1; /* fallthrough */ case STAC_HP_DV4: spec->gpio_led = 0x01; /* fallthrough */ case STAC_HP_DV5: snd_hda_codec_set_pincfg(codec, 0x0d, 0x90170010); stac92xx_auto_set_pinctl(codec, 0x0d, AC_PINCTL_OUT_EN); /* HP dv6 gives the headphone pin as a line-out. Thus we * need to set hp_detect flag here to force to enable HP * detection. */ spec->hp_detect = 1; break; case STAC_HP_HDX: spec->num_dmics = 1; spec->num_dmuxes = 1; spec->num_smuxes = 1; spec->gpio_led = 0x08; break; } if (hp_blike_system(codec->subsystem_id)) { pin_cfg = snd_hda_codec_get_pincfg(codec, 0x0f); if (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT || get_defcfg_device(pin_cfg) == AC_JACK_SPEAKER || get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT) { /* It was changed in the BIOS to just satisfy MS DTM. * Lets turn it back into slaved HP */ pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE)) | (AC_JACK_HP_OUT << AC_DEFCFG_DEVICE_SHIFT); pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC | AC_DEFCFG_SEQUENCE))) | 0x1f; snd_hda_codec_set_pincfg(codec, 0x0f, pin_cfg); } } if (find_mute_led_cfg(codec, 1)) snd_printd("mute LED gpio %d polarity %d\n", spec->gpio_led, spec->gpio_led_polarity); if (spec->gpio_led) { if (!spec->vref_mute_led_nid) { spec->gpio_mask |= spec->gpio_led; spec->gpio_dir |= spec->gpio_led; spec->gpio_data |= spec->gpio_led; } else { codec->patch_ops.set_power_state = stac92xx_set_power_state; } #ifdef CONFIG_PM codec->patch_ops.pre_resume = stac92xx_pre_resume; #endif } spec->multiout.dac_nids = spec->dac_nids; err = stac92xx_parse_auto_config(codec); if (!err) { if (spec->board_config < 0) { printk(KERN_WARNING "hda_codec: No auto-config is " "available, default to model=ref\n"); spec->board_config = STAC_92HD71BXX_REF; goto again; } err = -EINVAL; } if (err < 0) { stac92xx_free(codec); return err; } /* enable bass on HP dv7 */ if (spec->board_config == STAC_HP_DV4 || spec->board_config == STAC_HP_DV5) { unsigned int cap; cap = snd_hda_param_read(codec, 0x1, AC_PAR_GPIO_CAP); cap &= AC_GPIO_IO_COUNT; if (cap >= 6) stac_add_hp_bass_switch(codec); } codec->proc_widget_hook = stac92hd7x_proc_hook; return 0; } static int patch_stac922x(struct hda_codec *codec) { struct sigmatel_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->no_trigger_sense = 1; codec->spec = spec; spec->linear_tone_beep = 1; spec->num_pins = ARRAY_SIZE(stac922x_pin_nids); spec->pin_nids = stac922x_pin_nids; spec->board_config = snd_hda_check_board_config(codec, STAC_922X_MODELS, stac922x_models, stac922x_cfg_tbl); if (spec->board_config == STAC_INTEL_MAC_AUTO) { spec->gpio_mask = spec->gpio_dir = 0x03; spec->gpio_data = 0x03; /* Intel Macs have all same PCI SSID, so we need to check * codec SSID to distinguish the exact models */ printk(KERN_INFO "hda_codec: STAC922x, Apple subsys_id=%x\n", codec->subsystem_id); switch (codec->subsystem_id) { case 0x106b0800: spec->board_config = STAC_INTEL_MAC_V1; break; case 0x106b0600: case 0x106b0700: spec->board_config = STAC_INTEL_MAC_V2; break; case 0x106b0e00: case 0x106b0f00: case 0x106b1600: case 0x106b1700: case 0x106b0200: case 0x106b1e00: spec->board_config = STAC_INTEL_MAC_V3; break; case 0x106b1a00: case 0x00000100: spec->board_config = STAC_INTEL_MAC_V4; break; case 0x106b0a00: case 0x106b2200: spec->board_config = STAC_INTEL_MAC_V5; break; default: spec->board_config = STAC_INTEL_MAC_V3; break; } } again: if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac922x_brd_tbl[spec->board_config]); spec->adc_nids = stac922x_adc_nids; spec->mux_nids = stac922x_mux_nids; spec->num_muxes = ARRAY_SIZE(stac922x_mux_nids); spec->num_adcs = ARRAY_SIZE(stac922x_adc_nids); spec->num_dmics = 0; spec->num_pwrs = 0; spec->init = stac922x_core_init; spec->num_caps = STAC922X_NUM_CAPS; spec->capvols = stac922x_capvols; spec->capsws = stac922x_capsws; spec->multiout.dac_nids = spec->dac_nids; err = stac92xx_parse_auto_config(codec); if (!err) { if (spec->board_config < 0) { printk(KERN_WARNING "hda_codec: No auto-config is " "available, default to model=ref\n"); spec->board_config = STAC_D945_REF; goto again; } err = -EINVAL; } if (err < 0) { stac92xx_free(codec); return err; } codec->patch_ops = stac92xx_patch_ops; /* Fix Mux capture level; max to 2 */ snd_hda_override_amp_caps(codec, 0x12, HDA_OUTPUT, (0 << AC_AMPCAP_OFFSET_SHIFT) | (2 << AC_AMPCAP_NUM_STEPS_SHIFT) | (0x27 << AC_AMPCAP_STEP_SIZE_SHIFT) | (0 << AC_AMPCAP_MUTE_SHIFT)); return 0; } static int patch_stac927x(struct hda_codec *codec) { struct sigmatel_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->no_trigger_sense = 1; codec->spec = spec; spec->linear_tone_beep = 1; codec->slave_dig_outs = stac927x_slave_dig_outs; spec->num_pins = ARRAY_SIZE(stac927x_pin_nids); spec->pin_nids = stac927x_pin_nids; spec->board_config = snd_hda_check_board_config(codec, STAC_927X_MODELS, stac927x_models, stac927x_cfg_tbl); again: if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac927x_brd_tbl[spec->board_config]); spec->digbeep_nid = 0x23; spec->adc_nids = stac927x_adc_nids; spec->num_adcs = ARRAY_SIZE(stac927x_adc_nids); spec->mux_nids = stac927x_mux_nids; spec->num_muxes = ARRAY_SIZE(stac927x_mux_nids); spec->smux_nids = stac927x_smux_nids; spec->num_smuxes = ARRAY_SIZE(stac927x_smux_nids); spec->spdif_labels = stac927x_spdif_labels; spec->dac_list = stac927x_dac_nids; spec->multiout.dac_nids = spec->dac_nids; if (spec->board_config != STAC_D965_REF) { /* GPIO0 High = Enable EAPD */ spec->eapd_mask = spec->gpio_mask = 0x01; spec->gpio_dir = spec->gpio_data = 0x01; } switch (spec->board_config) { case STAC_D965_3ST: case STAC_D965_5ST: /* GPIO0 High = Enable EAPD */ spec->num_dmics = 0; spec->init = d965_core_init; break; case STAC_DELL_BIOS: switch (codec->subsystem_id) { case 0x10280209: case 0x1028022e: /* correct the device field to SPDIF out */ snd_hda_codec_set_pincfg(codec, 0x21, 0x01442070); break; } /* configure the analog microphone on some laptops */ snd_hda_codec_set_pincfg(codec, 0x0c, 0x90a79130); /* correct the front output jack as a hp out */ snd_hda_codec_set_pincfg(codec, 0x0f, 0x0227011f); /* correct the front input jack as a mic */ snd_hda_codec_set_pincfg(codec, 0x0e, 0x02a79130); /* fallthru */ case STAC_DELL_3ST: if (codec->subsystem_id != 0x1028022f) { /* GPIO2 High = Enable EAPD */ spec->eapd_mask = spec->gpio_mask = 0x04; spec->gpio_dir = spec->gpio_data = 0x04; } spec->dmic_nids = stac927x_dmic_nids; spec->num_dmics = STAC927X_NUM_DMICS; spec->init = dell_3st_core_init; spec->dmux_nids = stac927x_dmux_nids; spec->num_dmuxes = ARRAY_SIZE(stac927x_dmux_nids); break; case STAC_927X_VOLKNOB: spec->num_dmics = 0; spec->init = stac927x_volknob_core_init; break; default: spec->num_dmics = 0; spec->init = stac927x_core_init; break; } spec->num_caps = STAC927X_NUM_CAPS; spec->capvols = stac927x_capvols; spec->capsws = stac927x_capsws; spec->num_pwrs = 0; spec->aloopback_ctl = stac927x_loopback; spec->aloopback_mask = 0x40; spec->aloopback_shift = 0; spec->eapd_switch = 1; err = stac92xx_parse_auto_config(codec); if (!err) { if (spec->board_config < 0) { printk(KERN_WARNING "hda_codec: No auto-config is " "available, default to model=ref\n"); spec->board_config = STAC_D965_REF; goto again; } err = -EINVAL; } if (err < 0) { stac92xx_free(codec); return err; } codec->patch_ops = stac92xx_patch_ops; codec->proc_widget_hook = stac927x_proc_hook; /* * !!FIXME!! * The STAC927x seem to require fairly long delays for certain * command sequences. With too short delays (even if the answer * is set to RIRB properly), it results in the silence output * on some hardwares like Dell. * * The below flag enables the longer delay (see get_response * in hda_intel.c). */ codec->bus->needs_damn_long_delay = 1; /* no jack detecion for ref-no-jd model */ if (spec->board_config == STAC_D965_REF_NO_JD) spec->hp_detect = 0; return 0; } static int patch_stac9205(struct hda_codec *codec) { struct sigmatel_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->no_trigger_sense = 1; codec->spec = spec; spec->linear_tone_beep = 1; spec->num_pins = ARRAY_SIZE(stac9205_pin_nids); spec->pin_nids = stac9205_pin_nids; spec->board_config = snd_hda_check_board_config(codec, STAC_9205_MODELS, stac9205_models, stac9205_cfg_tbl); again: if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac9205_brd_tbl[spec->board_config]); spec->digbeep_nid = 0x23; spec->adc_nids = stac9205_adc_nids; spec->num_adcs = ARRAY_SIZE(stac9205_adc_nids); spec->mux_nids = stac9205_mux_nids; spec->num_muxes = ARRAY_SIZE(stac9205_mux_nids); spec->smux_nids = stac9205_smux_nids; spec->num_smuxes = ARRAY_SIZE(stac9205_smux_nids); spec->dmic_nids = stac9205_dmic_nids; spec->num_dmics = STAC9205_NUM_DMICS; spec->dmux_nids = stac9205_dmux_nids; spec->num_dmuxes = ARRAY_SIZE(stac9205_dmux_nids); spec->num_pwrs = 0; spec->init = stac9205_core_init; spec->aloopback_ctl = stac9205_loopback; spec->num_caps = STAC9205_NUM_CAPS; spec->capvols = stac9205_capvols; spec->capsws = stac9205_capsws; spec->aloopback_mask = 0x40; spec->aloopback_shift = 0; /* Turn on/off EAPD per HP plugging */ if (spec->board_config != STAC_9205_EAPD) spec->eapd_switch = 1; spec->multiout.dac_nids = spec->dac_nids; switch (spec->board_config){ case STAC_9205_DELL_M43: /* Enable SPDIF in/out */ snd_hda_codec_set_pincfg(codec, 0x1f, 0x01441030); snd_hda_codec_set_pincfg(codec, 0x20, 0x1c410030); /* Enable unsol response for GPIO4/Dock HP connection */ err = stac_add_event(codec, codec->afg, STAC_VREF_EVENT, 0x01); if (err < 0) return err; snd_hda_codec_write_cache(codec, codec->afg, 0, AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK, 0x10); snd_hda_jack_detect_enable(codec, codec->afg, 0); spec->gpio_dir = 0x0b; spec->eapd_mask = 0x01; spec->gpio_mask = 0x1b; spec->gpio_mute = 0x10; /* GPIO0 High = EAPD, GPIO1 Low = Headphone Mute, * GPIO3 Low = DRM */ spec->gpio_data = 0x01; break; case STAC_9205_REF: /* SPDIF-In enabled */ break; default: /* GPIO0 High = EAPD */ spec->eapd_mask = spec->gpio_mask = spec->gpio_dir = 0x1; spec->gpio_data = 0x01; break; } err = stac92xx_parse_auto_config(codec); if (!err) { if (spec->board_config < 0) { printk(KERN_WARNING "hda_codec: No auto-config is " "available, default to model=ref\n"); spec->board_config = STAC_9205_REF; goto again; } err = -EINVAL; } if (err < 0) { stac92xx_free(codec); return err; } codec->patch_ops = stac92xx_patch_ops; codec->proc_widget_hook = stac9205_proc_hook; return 0; } /* * STAC9872 hack */ static const struct hda_verb stac9872_core_init[] = { {0x15, AC_VERB_SET_CONNECT_SEL, 0x1}, /* mic-sel: 0a,0d,14,02 */ {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, /* Mic-in -> 0x9 */ {} }; static const hda_nid_t stac9872_pin_nids[] = { 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x11, 0x13, 0x14, }; static const hda_nid_t stac9872_adc_nids[] = { 0x8 /*,0x6*/ }; static const hda_nid_t stac9872_mux_nids[] = { 0x15 }; static const unsigned long stac9872_capvols[] = { HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_INPUT), }; #define stac9872_capsws stac9872_capvols static const unsigned int stac9872_vaio_pin_configs[9] = { 0x03211020, 0x411111f0, 0x411111f0, 0x03a15030, 0x411111f0, 0x90170110, 0x411111f0, 0x411111f0, 0x90a7013e }; static const char * const stac9872_models[STAC_9872_MODELS] = { [STAC_9872_AUTO] = "auto", [STAC_9872_VAIO] = "vaio", }; static const unsigned int *stac9872_brd_tbl[STAC_9872_MODELS] = { [STAC_9872_VAIO] = stac9872_vaio_pin_configs, }; static const struct snd_pci_quirk stac9872_cfg_tbl[] = { SND_PCI_QUIRK_MASK(0x104d, 0xfff0, 0x81e0, "Sony VAIO F/S", STAC_9872_VAIO), {} /* terminator */ }; static int patch_stac9872(struct hda_codec *codec) { struct sigmatel_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->no_trigger_sense = 1; codec->spec = spec; spec->linear_tone_beep = 1; spec->num_pins = ARRAY_SIZE(stac9872_pin_nids); spec->pin_nids = stac9872_pin_nids; spec->board_config = snd_hda_check_board_config(codec, STAC_9872_MODELS, stac9872_models, stac9872_cfg_tbl); if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac9872_brd_tbl[spec->board_config]); spec->multiout.dac_nids = spec->dac_nids; spec->num_adcs = ARRAY_SIZE(stac9872_adc_nids); spec->adc_nids = stac9872_adc_nids; spec->num_muxes = ARRAY_SIZE(stac9872_mux_nids); spec->mux_nids = stac9872_mux_nids; spec->init = stac9872_core_init; spec->num_caps = 1; spec->capvols = stac9872_capvols; spec->capsws = stac9872_capsws; err = stac92xx_parse_auto_config(codec); if (err < 0) { stac92xx_free(codec); return -EINVAL; } spec->input_mux = &spec->private_imux; codec->patch_ops = stac92xx_patch_ops; return 0; } /* * patch entries */ static const struct hda_codec_preset snd_hda_preset_sigmatel[] = { { .id = 0x83847690, .name = "STAC9200", .patch = patch_stac9200 }, { .id = 0x83847882, .name = "STAC9220 A1", .patch = patch_stac922x }, { .id = 0x83847680, .name = "STAC9221 A1", .patch = patch_stac922x }, { .id = 0x83847880, .name = "STAC9220 A2", .patch = patch_stac922x }, { .id = 0x83847681, .name = "STAC9220D/9223D A2", .patch = patch_stac922x }, { .id = 0x83847682, .name = "STAC9221 A2", .patch = patch_stac922x }, { .id = 0x83847683, .name = "STAC9221D A2", .patch = patch_stac922x }, { .id = 0x83847618, .name = "STAC9227", .patch = patch_stac927x }, { .id = 0x83847619, .name = "STAC9227", .patch = patch_stac927x }, { .id = 0x83847616, .name = "STAC9228", .patch = patch_stac927x }, { .id = 0x83847617, .name = "STAC9228", .patch = patch_stac927x }, { .id = 0x83847614, .name = "STAC9229", .patch = patch_stac927x }, { .id = 0x83847615, .name = "STAC9229", .patch = patch_stac927x }, { .id = 0x83847620, .name = "STAC9274", .patch = patch_stac927x }, { .id = 0x83847621, .name = "STAC9274D", .patch = patch_stac927x }, { .id = 0x83847622, .name = "STAC9273X", .patch = patch_stac927x }, { .id = 0x83847623, .name = "STAC9273D", .patch = patch_stac927x }, { .id = 0x83847624, .name = "STAC9272X", .patch = patch_stac927x }, { .id = 0x83847625, .name = "STAC9272D", .patch = patch_stac927x }, { .id = 0x83847626, .name = "STAC9271X", .patch = patch_stac927x }, { .id = 0x83847627, .name = "STAC9271D", .patch = patch_stac927x }, { .id = 0x83847628, .name = "STAC9274X5NH", .patch = patch_stac927x }, { .id = 0x83847629, .name = "STAC9274D5NH", .patch = patch_stac927x }, { .id = 0x83847632, .name = "STAC9202", .patch = patch_stac925x }, { .id = 0x83847633, .name = "STAC9202D", .patch = patch_stac925x }, { .id = 0x83847634, .name = "STAC9250", .patch = patch_stac925x }, { .id = 0x83847635, .name = "STAC9250D", .patch = patch_stac925x }, { .id = 0x83847636, .name = "STAC9251", .patch = patch_stac925x }, { .id = 0x83847637, .name = "STAC9250D", .patch = patch_stac925x }, { .id = 0x83847645, .name = "92HD206X", .patch = patch_stac927x }, { .id = 0x83847646, .name = "92HD206D", .patch = patch_stac927x }, /* The following does not take into account .id=0x83847661 when subsys = * 104D0C00 which is STAC9225s. Because of this, some SZ Notebooks are * currently not fully supported. */ { .id = 0x83847661, .name = "CXD9872RD/K", .patch = patch_stac9872 }, { .id = 0x83847662, .name = "STAC9872AK", .patch = patch_stac9872 }, { .id = 0x83847664, .name = "CXD9872AKD", .patch = patch_stac9872 }, { .id = 0x83847698, .name = "STAC9205", .patch = patch_stac9205 }, { .id = 0x838476a0, .name = "STAC9205", .patch = patch_stac9205 }, { .id = 0x838476a1, .name = "STAC9205D", .patch = patch_stac9205 }, { .id = 0x838476a2, .name = "STAC9204", .patch = patch_stac9205 }, { .id = 0x838476a3, .name = "STAC9204D", .patch = patch_stac9205 }, { .id = 0x838476a4, .name = "STAC9255", .patch = patch_stac9205 }, { .id = 0x838476a5, .name = "STAC9255D", .patch = patch_stac9205 }, { .id = 0x838476a6, .name = "STAC9254", .patch = patch_stac9205 }, { .id = 0x838476a7, .name = "STAC9254D", .patch = patch_stac9205 }, { .id = 0x111d7603, .name = "92HD75B3X5", .patch = patch_stac92hd71bxx}, { .id = 0x111d7604, .name = "92HD83C1X5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76d4, .name = "92HD83C1C5", .patch = patch_stac92hd83xxx}, { .id = 0x111d7605, .name = "92HD81B1X5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76d5, .name = "92HD81B1C5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76d1, .name = "92HD87B1/3", .patch = patch_stac92hd83xxx}, { .id = 0x111d76d9, .name = "92HD87B2/4", .patch = patch_stac92hd83xxx}, { .id = 0x111d7666, .name = "92HD88B3", .patch = patch_stac92hd83xxx}, { .id = 0x111d7667, .name = "92HD88B1", .patch = patch_stac92hd83xxx}, { .id = 0x111d7668, .name = "92HD88B2", .patch = patch_stac92hd83xxx}, { .id = 0x111d7669, .name = "92HD88B4", .patch = patch_stac92hd83xxx}, { .id = 0x111d7608, .name = "92HD75B2X5", .patch = patch_stac92hd71bxx}, { .id = 0x111d7674, .name = "92HD73D1X5", .patch = patch_stac92hd73xx }, { .id = 0x111d7675, .name = "92HD73C1X5", .patch = patch_stac92hd73xx }, { .id = 0x111d7676, .name = "92HD73E1X5", .patch = patch_stac92hd73xx }, { .id = 0x111d76b0, .name = "92HD71B8X", .patch = patch_stac92hd71bxx }, { .id = 0x111d76b1, .name = "92HD71B8X", .patch = patch_stac92hd71bxx }, { .id = 0x111d76b2, .name = "92HD71B7X", .patch = patch_stac92hd71bxx }, { .id = 0x111d76b3, .name = "92HD71B7X", .patch = patch_stac92hd71bxx }, { .id = 0x111d76b4, .name = "92HD71B6X", .patch = patch_stac92hd71bxx }, { .id = 0x111d76b5, .name = "92HD71B6X", .patch = patch_stac92hd71bxx }, { .id = 0x111d76b6, .name = "92HD71B5X", .patch = patch_stac92hd71bxx }, { .id = 0x111d76b7, .name = "92HD71B5X", .patch = patch_stac92hd71bxx }, { .id = 0x111d76c0, .name = "92HD89C3", .patch = patch_stac92hd73xx }, { .id = 0x111d76c1, .name = "92HD89C2", .patch = patch_stac92hd73xx }, { .id = 0x111d76c2, .name = "92HD89C1", .patch = patch_stac92hd73xx }, { .id = 0x111d76c3, .name = "92HD89B3", .patch = patch_stac92hd73xx }, { .id = 0x111d76c4, .name = "92HD89B2", .patch = patch_stac92hd73xx }, { .id = 0x111d76c5, .name = "92HD89B1", .patch = patch_stac92hd73xx }, { .id = 0x111d76c6, .name = "92HD89E3", .patch = patch_stac92hd73xx }, { .id = 0x111d76c7, .name = "92HD89E2", .patch = patch_stac92hd73xx }, { .id = 0x111d76c8, .name = "92HD89E1", .patch = patch_stac92hd73xx }, { .id = 0x111d76c9, .name = "92HD89D3", .patch = patch_stac92hd73xx }, { .id = 0x111d76ca, .name = "92HD89D2", .patch = patch_stac92hd73xx }, { .id = 0x111d76cb, .name = "92HD89D1", .patch = patch_stac92hd73xx }, { .id = 0x111d76cc, .name = "92HD89F3", .patch = patch_stac92hd73xx }, { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx }, { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx }, { .id = 0x111d76df, .name = "92HD93BXX", .patch = patch_stac92hd83xxx}, { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx}, { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx}, { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx}, { .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx}, { .id = 0x111d76e8, .name = "92HD66B1X5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76e9, .name = "92HD66B2X5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76ea, .name = "92HD66B3X5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76eb, .name = "92HD66C1X5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76ec, .name = "92HD66C2X5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76ed, .name = "92HD66C3X5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76ee, .name = "92HD66B1X3", .patch = patch_stac92hd83xxx}, { .id = 0x111d76ef, .name = "92HD66B2X3", .patch = patch_stac92hd83xxx}, { .id = 0x111d76f0, .name = "92HD66B3X3", .patch = patch_stac92hd83xxx}, { .id = 0x111d76f1, .name = "92HD66C1X3", .patch = patch_stac92hd83xxx}, { .id = 0x111d76f2, .name = "92HD66C2X3", .patch = patch_stac92hd83xxx}, { .id = 0x111d76f3, .name = "92HD66C3/65", .patch = patch_stac92hd83xxx}, {} /* terminator */ }; MODULE_ALIAS("snd-hda-codec-id:8384*"); MODULE_ALIAS("snd-hda-codec-id:111d*"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("IDT/Sigmatel HD-audio codec"); static struct hda_codec_preset_list sigmatel_list = { .preset = snd_hda_preset_sigmatel, .owner = THIS_MODULE, }; static int __init patch_sigmatel_init(void) { return snd_hda_add_codec_preset(&sigmatel_list); } static void __exit patch_sigmatel_exit(void) { snd_hda_delete_codec_preset(&sigmatel_list); } module_init(patch_sigmatel_init) module_exit(patch_sigmatel_exit)
gpl-2.0
youfoh/TizenProject
arch/arm/mach-s3c24xx/simtec-pm.c
4511
1559
/* linux/arch/arm/plat-s3c24xx/pm-simtec.c * * Copyright 2004 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * http://armlinux.simtec.co.uk/ * * Power Management helpers for Simtec S3C24XX implementations * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/device.h> #include <linux/io.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include <mach/map.h> #include <mach/regs-gpio.h> #include <asm/mach-types.h> #include <plat/pm.h> #include "regs-mem.h" #define COPYRIGHT ", Copyright 2005 Simtec Electronics" /* pm_simtec_init * * enable the power management functions */ static __init int pm_simtec_init(void) { unsigned long gstatus4; /* check which machine we are running on */ if (!machine_is_bast() && !machine_is_vr1000() && !machine_is_anubis() && !machine_is_osiris() && !machine_is_aml_m5900()) return 0; printk(KERN_INFO "Simtec Board Power Management" COPYRIGHT "\n"); gstatus4 = (__raw_readl(S3C2410_BANKCON7) & 0x3) << 30; gstatus4 |= (__raw_readl(S3C2410_BANKCON6) & 0x3) << 28; gstatus4 |= (__raw_readl(S3C2410_BANKSIZE) & S3C2410_BANKSIZE_MASK); __raw_writel(gstatus4, S3C2410_GSTATUS4); return s3c_pm_init(); } arch_initcall(pm_simtec_init);
gpl-2.0
vwarg/ubuntutouch-kernel-jfltexx
drivers/gpu/drm/radeon/cayman_blit_shaders.c
7327
8320
/* * Copyright 2010 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Alex Deucher <alexander.deucher@amd.com> */ #include <linux/bug.h> #include <linux/types.h> #include <linux/kernel.h> /* * evergreen cards need to use the 3D engine to blit data which requires * quite a bit of hw state setup. Rather than pull the whole 3D driver * (which normally generates the 3D state) into the DRM, we opt to use * statically generated state tables. The regsiter state and shaders * were hand generated to support blitting functionality. See the 3D * driver or documentation for descriptions of the registers and * shader instructions. */ const u32 cayman_default_state[] = { 0xc0066900, 0x00000000, 0x00000060, /* DB_RENDER_CONTROL */ 0x00000000, /* DB_COUNT_CONTROL */ 0x00000000, /* DB_DEPTH_VIEW */ 0x0000002a, /* DB_RENDER_OVERRIDE */ 0x00000000, /* DB_RENDER_OVERRIDE2 */ 0x00000000, /* DB_HTILE_DATA_BASE */ 0xc0026900, 0x0000000a, 0x00000000, /* DB_STENCIL_CLEAR */ 0x00000000, /* DB_DEPTH_CLEAR */ 0xc0036900, 0x0000000f, 0x00000000, /* DB_DEPTH_INFO */ 0x00000000, /* DB_Z_INFO */ 0x00000000, /* DB_STENCIL_INFO */ 0xc0016900, 0x00000080, 0x00000000, /* PA_SC_WINDOW_OFFSET */ 0xc00d6900, 0x00000083, 0x0000ffff, /* PA_SC_CLIPRECT_RULE */ 0x00000000, /* PA_SC_CLIPRECT_0_TL */ 0x20002000, /* PA_SC_CLIPRECT_0_BR */ 0x00000000, 0x20002000, 0x00000000, 0x20002000, 0x00000000, 0x20002000, 0xaaaaaaaa, /* PA_SC_EDGERULE */ 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */ 0x0000000f, /* CB_TARGET_MASK */ 0x0000000f, /* CB_SHADER_MASK */ 0xc0226900, 0x00000094, 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */ 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x00000000, /* PA_SC_VPORT_ZMIN_0 */ 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */ 0xc0016900, 0x000000d4, 0x00000000, /* SX_MISC */ 0xc0026900, 0x000000d9, 0x00000000, /* CP_RINGID */ 0x00000000, /* CP_VMID */ 0xc0096900, 0x00000100, 0x00ffffff, /* VGT_MAX_VTX_INDX */ 0x00000000, /* VGT_MIN_VTX_INDX */ 0x00000000, /* VGT_INDX_OFFSET */ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */ 0x00000000, /* SX_ALPHA_TEST_CONTROL */ 0x00000000, /* CB_BLEND_RED */ 0x00000000, /* CB_BLEND_GREEN */ 0x00000000, /* CB_BLEND_BLUE */ 0x00000000, /* CB_BLEND_ALPHA */ 0xc0016900, 0x00000187, 0x00000100, /* SPI_VS_OUT_ID_0 */ 0xc0026900, 0x00000191, 0x00000100, /* SPI_PS_INPUT_CNTL_0 */ 0x00000101, /* SPI_PS_INPUT_CNTL_1 */ 0xc0016900, 0x000001b1, 0x00000000, /* SPI_VS_OUT_CONFIG */ 0xc0106900, 0x000001b3, 0x20000001, /* SPI_PS_IN_CONTROL_0 */ 0x00000000, /* SPI_PS_IN_CONTROL_1 */ 0x00000000, /* SPI_INTERP_CONTROL_0 */ 0x00000000, /* SPI_INPUT_Z */ 0x00000000, /* SPI_FOG_CNTL */ 0x00100000, /* SPI_BARYC_CNTL */ 0x00000000, /* SPI_PS_IN_CONTROL_2 */ 0x00000000, /* SPI_COMPUTE_INPUT_CNTL */ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_X */ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Y */ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Z */ 0x00000000, /* SPI_GPR_MGMT */ 0x00000000, /* SPI_LDS_MGMT */ 0x00000000, /* SPI_STACK_MGMT */ 0x00000000, /* SPI_WAVE_MGMT_1 */ 0x00000000, /* SPI_WAVE_MGMT_2 */ 0xc0016900, 0x000001e0, 0x00000000, /* CB_BLEND0_CONTROL */ 0xc00e6900, 0x00000200, 0x00000000, /* DB_DEPTH_CONTROL */ 0x00000000, /* DB_EQAA */ 0x00cc0010, /* CB_COLOR_CONTROL */ 0x00000210, /* DB_SHADER_CONTROL */ 0x00010000, /* PA_CL_CLIP_CNTL */ 0x00000004, /* PA_SU_SC_MODE_CNTL */ 0x00000100, /* PA_CL_VTE_CNTL */ 0x00000000, /* PA_CL_VS_OUT_CNTL */ 0x00000000, /* PA_CL_NANINF_CNTL */ 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */ 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */ 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */ 0x00000000, /* */ 0x00000000, /* */ 0xc0026900, 0x00000229, 0x00000000, /* SQ_PGM_START_FS */ 0x00000000, 0xc0016900, 0x0000023b, 0x00000000, /* SQ_LDS_ALLOC_PS */ 0xc0066900, 0x00000240, 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xc0046900, 0x00000247, 0x00000000, /* SQ_GS_VERT_ITEMSIZE */ 0x00000000, 0x00000000, 0x00000000, 0xc0116900, 0x00000280, 0x00000000, /* PA_SU_POINT_SIZE */ 0x00000000, /* PA_SU_POINT_MINMAX */ 0x00000008, /* PA_SU_LINE_CNTL */ 0x00000000, /* PA_SC_LINE_STIPPLE */ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */ 0x00000000, /* VGT_HOS_CNTL */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, /* VGT_GS_MODE */ 0xc0026900, 0x00000292, 0x00000000, /* PA_SC_MODE_CNTL_0 */ 0x00000000, /* PA_SC_MODE_CNTL_1 */ 0xc0016900, 0x000002a1, 0x00000000, /* VGT_PRIMITIVEID_EN */ 0xc0016900, 0x000002a5, 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */ 0xc0026900, 0x000002a8, 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */ 0x00000000, 0xc0026900, 0x000002ad, 0x00000000, /* VGT_REUSE_OFF */ 0x00000000, 0xc0016900, 0x000002d5, 0x00000000, /* VGT_SHADER_STAGES_EN */ 0xc0016900, 0x000002dc, 0x0000aa00, /* DB_ALPHA_TO_MASK */ 0xc0066900, 0x000002de, 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xc0026900, 0x000002e5, 0x00000000, /* VGT_STRMOUT_CONFIG */ 0x00000000, 0xc01b6900, 0x000002f5, 0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */ 0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */ 0x00000000, /* PA_SC_LINE_CNTL */ 0x00000000, /* PA_SC_AA_CONFIG */ 0x00000005, /* PA_SU_VTX_CNTL */ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */ 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */ 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */ 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */ 0xffffffff, 0xc0026900, 0x00000316, 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 0x00000010, /* */ }; const u32 cayman_vs[] = { 0x00000004, 0x80400400, 0x0000a03c, 0x95000688, 0x00004000, 0x15000688, 0x00000000, 0x88000000, 0x04000000, 0x67961001, #ifdef __BIG_ENDIAN 0x00020000, #else 0x00000000, #endif 0x00000000, 0x04000000, 0x67961000, #ifdef __BIG_ENDIAN 0x00020008, #else 0x00000008, #endif 0x00000000, }; const u32 cayman_ps[] = { 0x00000004, 0xa00c0000, 0x00000008, 0x80400000, 0x00000000, 0x95000688, 0x00000000, 0x88000000, 0x00380400, 0x00146b10, 0x00380000, 0x20146b10, 0x00380400, 0x40146b00, 0x80380000, 0x60146b00, 0x00000010, 0x000d1000, 0xb0800000, 0x00000000, }; const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps); const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs); const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
gpl-2.0
TeamBliss-Devices/android_kernel_samsung_jflte
drivers/gpu/drm/radeon/cayman_blit_shaders.c
7327
8320
/* * Copyright 2010 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Alex Deucher <alexander.deucher@amd.com> */ #include <linux/bug.h> #include <linux/types.h> #include <linux/kernel.h> /* * evergreen cards need to use the 3D engine to blit data which requires * quite a bit of hw state setup. Rather than pull the whole 3D driver * (which normally generates the 3D state) into the DRM, we opt to use * statically generated state tables. The regsiter state and shaders * were hand generated to support blitting functionality. See the 3D * driver or documentation for descriptions of the registers and * shader instructions. */ const u32 cayman_default_state[] = { 0xc0066900, 0x00000000, 0x00000060, /* DB_RENDER_CONTROL */ 0x00000000, /* DB_COUNT_CONTROL */ 0x00000000, /* DB_DEPTH_VIEW */ 0x0000002a, /* DB_RENDER_OVERRIDE */ 0x00000000, /* DB_RENDER_OVERRIDE2 */ 0x00000000, /* DB_HTILE_DATA_BASE */ 0xc0026900, 0x0000000a, 0x00000000, /* DB_STENCIL_CLEAR */ 0x00000000, /* DB_DEPTH_CLEAR */ 0xc0036900, 0x0000000f, 0x00000000, /* DB_DEPTH_INFO */ 0x00000000, /* DB_Z_INFO */ 0x00000000, /* DB_STENCIL_INFO */ 0xc0016900, 0x00000080, 0x00000000, /* PA_SC_WINDOW_OFFSET */ 0xc00d6900, 0x00000083, 0x0000ffff, /* PA_SC_CLIPRECT_RULE */ 0x00000000, /* PA_SC_CLIPRECT_0_TL */ 0x20002000, /* PA_SC_CLIPRECT_0_BR */ 0x00000000, 0x20002000, 0x00000000, 0x20002000, 0x00000000, 0x20002000, 0xaaaaaaaa, /* PA_SC_EDGERULE */ 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */ 0x0000000f, /* CB_TARGET_MASK */ 0x0000000f, /* CB_SHADER_MASK */ 0xc0226900, 0x00000094, 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */ 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x00000000, /* PA_SC_VPORT_ZMIN_0 */ 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */ 0xc0016900, 0x000000d4, 0x00000000, /* SX_MISC */ 0xc0026900, 0x000000d9, 0x00000000, /* CP_RINGID */ 0x00000000, /* CP_VMID */ 0xc0096900, 0x00000100, 0x00ffffff, /* VGT_MAX_VTX_INDX */ 0x00000000, /* VGT_MIN_VTX_INDX */ 0x00000000, /* VGT_INDX_OFFSET */ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */ 0x00000000, /* SX_ALPHA_TEST_CONTROL */ 0x00000000, /* CB_BLEND_RED */ 0x00000000, /* CB_BLEND_GREEN */ 0x00000000, /* CB_BLEND_BLUE */ 0x00000000, /* CB_BLEND_ALPHA */ 0xc0016900, 0x00000187, 0x00000100, /* SPI_VS_OUT_ID_0 */ 0xc0026900, 0x00000191, 0x00000100, /* SPI_PS_INPUT_CNTL_0 */ 0x00000101, /* SPI_PS_INPUT_CNTL_1 */ 0xc0016900, 0x000001b1, 0x00000000, /* SPI_VS_OUT_CONFIG */ 0xc0106900, 0x000001b3, 0x20000001, /* SPI_PS_IN_CONTROL_0 */ 0x00000000, /* SPI_PS_IN_CONTROL_1 */ 0x00000000, /* SPI_INTERP_CONTROL_0 */ 0x00000000, /* SPI_INPUT_Z */ 0x00000000, /* SPI_FOG_CNTL */ 0x00100000, /* SPI_BARYC_CNTL */ 0x00000000, /* SPI_PS_IN_CONTROL_2 */ 0x00000000, /* SPI_COMPUTE_INPUT_CNTL */ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_X */ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Y */ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Z */ 0x00000000, /* SPI_GPR_MGMT */ 0x00000000, /* SPI_LDS_MGMT */ 0x00000000, /* SPI_STACK_MGMT */ 0x00000000, /* SPI_WAVE_MGMT_1 */ 0x00000000, /* SPI_WAVE_MGMT_2 */ 0xc0016900, 0x000001e0, 0x00000000, /* CB_BLEND0_CONTROL */ 0xc00e6900, 0x00000200, 0x00000000, /* DB_DEPTH_CONTROL */ 0x00000000, /* DB_EQAA */ 0x00cc0010, /* CB_COLOR_CONTROL */ 0x00000210, /* DB_SHADER_CONTROL */ 0x00010000, /* PA_CL_CLIP_CNTL */ 0x00000004, /* PA_SU_SC_MODE_CNTL */ 0x00000100, /* PA_CL_VTE_CNTL */ 0x00000000, /* PA_CL_VS_OUT_CNTL */ 0x00000000, /* PA_CL_NANINF_CNTL */ 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */ 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */ 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */ 0x00000000, /* */ 0x00000000, /* */ 0xc0026900, 0x00000229, 0x00000000, /* SQ_PGM_START_FS */ 0x00000000, 0xc0016900, 0x0000023b, 0x00000000, /* SQ_LDS_ALLOC_PS */ 0xc0066900, 0x00000240, 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xc0046900, 0x00000247, 0x00000000, /* SQ_GS_VERT_ITEMSIZE */ 0x00000000, 0x00000000, 0x00000000, 0xc0116900, 0x00000280, 0x00000000, /* PA_SU_POINT_SIZE */ 0x00000000, /* PA_SU_POINT_MINMAX */ 0x00000008, /* PA_SU_LINE_CNTL */ 0x00000000, /* PA_SC_LINE_STIPPLE */ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */ 0x00000000, /* VGT_HOS_CNTL */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, /* VGT_GS_MODE */ 0xc0026900, 0x00000292, 0x00000000, /* PA_SC_MODE_CNTL_0 */ 0x00000000, /* PA_SC_MODE_CNTL_1 */ 0xc0016900, 0x000002a1, 0x00000000, /* VGT_PRIMITIVEID_EN */ 0xc0016900, 0x000002a5, 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */ 0xc0026900, 0x000002a8, 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */ 0x00000000, 0xc0026900, 0x000002ad, 0x00000000, /* VGT_REUSE_OFF */ 0x00000000, 0xc0016900, 0x000002d5, 0x00000000, /* VGT_SHADER_STAGES_EN */ 0xc0016900, 0x000002dc, 0x0000aa00, /* DB_ALPHA_TO_MASK */ 0xc0066900, 0x000002de, 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xc0026900, 0x000002e5, 0x00000000, /* VGT_STRMOUT_CONFIG */ 0x00000000, 0xc01b6900, 0x000002f5, 0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */ 0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */ 0x00000000, /* PA_SC_LINE_CNTL */ 0x00000000, /* PA_SC_AA_CONFIG */ 0x00000005, /* PA_SU_VTX_CNTL */ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */ 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */ 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */ 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */ 0xffffffff, 0xc0026900, 0x00000316, 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 0x00000010, /* */ }; const u32 cayman_vs[] = { 0x00000004, 0x80400400, 0x0000a03c, 0x95000688, 0x00004000, 0x15000688, 0x00000000, 0x88000000, 0x04000000, 0x67961001, #ifdef __BIG_ENDIAN 0x00020000, #else 0x00000000, #endif 0x00000000, 0x04000000, 0x67961000, #ifdef __BIG_ENDIAN 0x00020008, #else 0x00000008, #endif 0x00000000, }; const u32 cayman_ps[] = { 0x00000004, 0xa00c0000, 0x00000008, 0x80400000, 0x00000000, 0x95000688, 0x00000000, 0x88000000, 0x00380400, 0x00146b10, 0x00380000, 0x20146b10, 0x00380400, 0x40146b00, 0x80380000, 0x60146b00, 0x00000010, 0x000d1000, 0xb0800000, 0x00000000, }; const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps); const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs); const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
gpl-2.0
novaspirit/tf101-nv-linux
arch/cris/mm/init.c
7583
2142
/* * linux/arch/cris/mm/init.c * * Copyright (C) 1995 Linus Torvalds * Copyright (C) 2000,2001 Axis Communications AB * * Authors: Bjorn Wesen (bjornw@axis.com) * */ #include <linux/gfp.h> #include <linux/init.h> #include <linux/bootmem.h> #include <asm/tlb.h> unsigned long empty_zero_page; extern char _stext, _edata, _etext; /* From linkerscript */ extern char __init_begin, __init_end; void __init mem_init(void) { int codesize, reservedpages, datasize, initsize; unsigned long tmp; BUG_ON(!mem_map); /* max/min_low_pfn was set by setup.c * now we just copy it to some other necessary places... * * high_memory was also set in setup.c */ max_mapnr = num_physpages = max_low_pfn - min_low_pfn; /* this will put all memory onto the freelists */ totalram_pages = free_all_bootmem(); reservedpages = 0; for (tmp = 0; tmp < max_mapnr; tmp++) { /* * Only count reserved RAM pages */ if (PageReserved(mem_map + tmp)) reservedpages++; } codesize = (unsigned long) &_etext - (unsigned long) &_stext; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, " "%dk init)\n" , nr_free_pages() << (PAGE_SHIFT-10), max_mapnr << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10 ); } /* free the pages occupied by initialization code */ void free_initmem(void) { unsigned long addr; addr = (unsigned long)(&__init_begin); for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); free_page(addr); totalram_pages++; } printk (KERN_INFO "Freeing unused kernel memory: %luk freed\n", (unsigned long)((&__init_end - &__init_begin) >> 10)); }
gpl-2.0
dagnarf/i717morepower
drivers/staging/speakup/speakup_acntpc.c
7583
9338
/* * written by: Kirk Reiser <kirk@braille.uwo.ca> * this version considerably modified by David Borowski, david575@rogers.com * * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * this code is specificly written as a driver for the speakup screenreview * package and is not a general device driver. * This driver is for the Aicom Acent PC internal synthesizer. */ #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/kthread.h> #include "spk_priv.h" #include "serialio.h" #include "speakup.h" #include "speakup_acnt.h" /* local header file for Accent values */ #define DRV_VERSION "2.10" #define PROCSPEECH '\r' static int synth_probe(struct spk_synth *synth); static void accent_release(void); static const char *synth_immediate(struct spk_synth *synth, const char *buf); static void do_catch_up(struct spk_synth *synth); static void synth_flush(struct spk_synth *synth); static int synth_port_control; static int port_forced; static unsigned int synth_portlist[] = { 0x2a8, 0 }; static struct var_t vars[] = { { CAPS_START, .u.s = {"\033P8" } }, { CAPS_STOP, .u.s = {"\033P5" } }, { RATE, .u.n = {"\033R%c", 9, 0, 17, 0, 0, "0123456789abcdefgh" } }, { PITCH, .u.n = {"\033P%d", 5, 0, 9, 0, 0, NULL } }, { VOL, .u.n = {"\033A%d", 5, 0, 9, 0, 0, NULL } }, { TONE, .u.n = {"\033V%d", 5, 0, 9, 0, 0, NULL } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/acntpc. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute tone_attribute = __ATTR(tone, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &pitch_attribute.attr, &rate_attribute.attr, &tone_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static struct spk_synth synth_acntpc = { .name = "acntpc", .version = DRV_VERSION, .long_name = "Accent PC", .init = "\033=X \033Oi\033T2\033=M\033N1\n", .procspeech = PROCSPEECH, .clear = SYNTH_CLEAR, .delay = 500, .trigger = 50, .jiffies = 50, .full = 1000, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .probe = synth_probe, .release = accent_release, .synth_immediate = synth_immediate, .catch_up = do_catch_up, .flush = synth_flush, .is_alive = spk_synth_is_alive_nop, .synth_adjust = NULL, .read_buff_add = NULL, .get_index = NULL, .indexing = { .command = NULL, .lowindex = 0, .highindex = 0, .currindex = 0, }, .attributes = { .attrs = synth_attrs, .name = "acntpc", }, }; static inline bool synth_writable(void) { return inb_p(synth_port_control) & SYNTH_WRITABLE; } static inline bool synth_full(void) { return inb_p(speakup_info.port_tts + UART_RX) == 'F'; } static const char *synth_immediate(struct spk_synth *synth, const char *buf) { u_char ch; while ((ch = *buf)) { int timeout = SPK_XMITR_TIMEOUT; if (ch == '\n') ch = PROCSPEECH; if (synth_full()) return buf; while (synth_writable()) { if (!--timeout) return buf; udelay(1); } outb_p(ch, speakup_info.port_tts); buf++; } return 0; } static void do_catch_up(struct spk_synth *synth) { u_char ch; unsigned long flags; unsigned long jiff_max; int timeout; int delay_time_val; int jiffy_delta_val; int full_time_val; struct var_t *delay_time; struct var_t *full_time; struct var_t *jiffy_delta; jiffy_delta = get_var(JIFFY); delay_time = get_var(DELAY); full_time = get_var(FULL); spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; spk_unlock(flags); jiff_max = jiffies + jiffy_delta_val; while (!kthread_should_stop()) { spk_lock(flags); if (speakup_info.flushing) { speakup_info.flushing = 0; spk_unlock(flags); synth->flush(synth); continue; } if (synth_buffer_empty()) { spk_unlock(flags); break; } set_current_state(TASK_INTERRUPTIBLE); full_time_val = full_time->u.n.value; spk_unlock(flags); if (synth_full()) { schedule_timeout(msecs_to_jiffies(full_time_val)); continue; } set_current_state(TASK_RUNNING); timeout = SPK_XMITR_TIMEOUT; while (synth_writable()) { if (!--timeout) break; udelay(1); } spk_lock(flags); ch = synth_buffer_getc(); spk_unlock(flags); if (ch == '\n') ch = PROCSPEECH; outb_p(ch, speakup_info.port_tts); if (jiffies >= jiff_max && ch == SPACE) { timeout = SPK_XMITR_TIMEOUT; while (synth_writable()) { if (!--timeout) break; udelay(1); } outb_p(PROCSPEECH, speakup_info.port_tts); spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; delay_time_val = delay_time->u.n.value; spk_unlock(flags); schedule_timeout(msecs_to_jiffies(delay_time_val)); jiff_max = jiffies+jiffy_delta_val; } } timeout = SPK_XMITR_TIMEOUT; while (synth_writable()) { if (!--timeout) break; udelay(1); } outb_p(PROCSPEECH, speakup_info.port_tts); } static void synth_flush(struct spk_synth *synth) { outb_p(SYNTH_CLEAR, speakup_info.port_tts); } static int synth_probe(struct spk_synth *synth) { unsigned int port_val = 0; int i = 0; pr_info("Probing for %s.\n", synth->long_name); if (port_forced) { speakup_info.port_tts = port_forced; pr_info("probe forced to %x by kernel command line\n", speakup_info.port_tts); if (synth_request_region(speakup_info.port_tts-1, SYNTH_IO_EXTENT)) { pr_warn("sorry, port already reserved\n"); return -EBUSY; } port_val = inw(speakup_info.port_tts-1); synth_port_control = speakup_info.port_tts-1; } else { for (i = 0; synth_portlist[i]; i++) { if (synth_request_region(synth_portlist[i], SYNTH_IO_EXTENT)) { pr_warn ("request_region: failed with 0x%x, %d\n", synth_portlist[i], SYNTH_IO_EXTENT); continue; } port_val = inw(synth_portlist[i]) & 0xfffc; if (port_val == 0x53fc) { /* 'S' and out&input bits */ synth_port_control = synth_portlist[i]; speakup_info.port_tts = synth_port_control+1; break; } } } port_val &= 0xfffc; if (port_val != 0x53fc) { /* 'S' and out&input bits */ pr_info("%s: not found\n", synth->long_name); synth_release_region(synth_port_control, SYNTH_IO_EXTENT); synth_port_control = 0; return -ENODEV; } pr_info("%s: %03x-%03x, driver version %s,\n", synth->long_name, synth_port_control, synth_port_control+SYNTH_IO_EXTENT-1, synth->version); synth->alive = 1; return 0; } static void accent_release(void) { if (speakup_info.port_tts) synth_release_region(speakup_info.port_tts-1, SYNTH_IO_EXTENT); speakup_info.port_tts = 0; } module_param_named(port, port_forced, int, S_IRUGO); module_param_named(start, synth_acntpc.startup, short, S_IRUGO); MODULE_PARM_DESC(port, "Set the port for the synthesizer (override probing)."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); static int __init acntpc_init(void) { return synth_add(&synth_acntpc); } static void __exit acntpc_exit(void) { synth_remove(&synth_acntpc); } module_init(acntpc_init); module_exit(acntpc_exit); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_AUTHOR("David Borowski"); MODULE_DESCRIPTION("Speakup support for Accent PC synthesizer"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
AICP/kernel_xiaomi_cancro
drivers/staging/speakup/speakup_dectlk.c
7583
8873
/* * originally written by: Kirk Reiser <kirk@braille.uwo.ca> * this version considerably modified by David Borowski, david575@rogers.com * * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * specificly written as a driver for the speakup screenreview * s not a general device driver. */ #include <linux/unistd.h> #include <linux/proc_fs.h> #include <linux/jiffies.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/kthread.h> #include "speakup.h" #include "spk_priv.h" #include "serialio.h" #define DRV_VERSION "2.20" #define SYNTH_CLEAR 0x03 #define PROCSPEECH 0x0b static int xoff; static inline int synth_full(void) { return xoff; } static void do_catch_up(struct spk_synth *synth); static void synth_flush(struct spk_synth *synth); static void read_buff_add(u_char c); static unsigned char get_index(void); static int in_escape; static int is_flushing; static spinlock_t flush_lock; static DECLARE_WAIT_QUEUE_HEAD(flush); static struct var_t vars[] = { { CAPS_START, .u.s = {"[:dv ap 160] " } }, { CAPS_STOP, .u.s = {"[:dv ap 100 ] " } }, { RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } }, { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } }, { VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } }, { PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } }, { VOICE, .u.n = {"[:n%c] ", 0, 0, 9, 0, 0, "phfdburwkv" } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/dectlk. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = __ATTR(punct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = __ATTR(voice, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &pitch_attribute.attr, &punct_attribute.attr, &rate_attribute.attr, &voice_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static int ap_defaults[] = {122, 89, 155, 110, 208, 240, 200, 106, 306}; static int g5_defaults[] = {86, 81, 86, 84, 81, 80, 83, 83, 73}; static struct spk_synth synth_dectlk = { .name = "dectlk", .version = DRV_VERSION, .long_name = "Dectalk Express", .init = "[:error sp :name paul :rate 180 :tsr off] ", .procspeech = PROCSPEECH, .clear = SYNTH_CLEAR, .delay = 500, .trigger = 50, .jiffies = 50, .full = 40000, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .default_pitch = ap_defaults, .default_vol = g5_defaults, .probe = serial_synth_probe, .release = spk_serial_release, .synth_immediate = spk_synth_immediate, .catch_up = do_catch_up, .flush = synth_flush, .is_alive = spk_synth_is_alive_restart, .synth_adjust = NULL, .read_buff_add = read_buff_add, .get_index = get_index, .indexing = { .command = "[:in re %d ] ", .lowindex = 1, .highindex = 8, .currindex = 1, }, .attributes = { .attrs = synth_attrs, .name = "dectlk", }, }; static int is_indnum(u_char *ch) { if ((*ch >= '0') && (*ch <= '9')) { *ch = *ch - '0'; return 1; } return 0; } static u_char lastind; static unsigned char get_index(void) { u_char rv; rv = lastind; lastind = 0; return rv; } static void read_buff_add(u_char c) { static int ind = -1; if (c == 0x01) { unsigned long flags; spin_lock_irqsave(&flush_lock, flags); is_flushing = 0; wake_up_interruptible(&flush); spin_unlock_irqrestore(&flush_lock, flags); } else if (c == 0x13) { xoff = 1; } else if (c == 0x11) { xoff = 0; } else if (is_indnum(&c)) { if (ind == -1) ind = c; else ind = ind * 10 + c; } else if ((c > 31) && (c < 127)) { if (ind != -1) lastind = (u_char)ind; ind = -1; } } static void do_catch_up(struct spk_synth *synth) { int synth_full_val = 0; static u_char ch; static u_char last = '\0'; unsigned long flags; unsigned long jiff_max; unsigned long timeout = msecs_to_jiffies(4000); DEFINE_WAIT(wait); struct var_t *jiffy_delta; struct var_t *delay_time; int jiffy_delta_val; int delay_time_val; jiffy_delta = get_var(JIFFY); delay_time = get_var(DELAY); spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; spk_unlock(flags); jiff_max = jiffies + jiffy_delta_val; while (!kthread_should_stop()) { /* if no ctl-a in 4, send data anyway */ spin_lock_irqsave(&flush_lock, flags); while (is_flushing && timeout) { prepare_to_wait(&flush, &wait, TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&flush_lock, flags); timeout = schedule_timeout(timeout); spin_lock_irqsave(&flush_lock, flags); } finish_wait(&flush, &wait); is_flushing = 0; spin_unlock_irqrestore(&flush_lock, flags); spk_lock(flags); if (speakup_info.flushing) { speakup_info.flushing = 0; spk_unlock(flags); synth->flush(synth); continue; } if (synth_buffer_empty()) { spk_unlock(flags); break; } ch = synth_buffer_peek(); set_current_state(TASK_INTERRUPTIBLE); delay_time_val = delay_time->u.n.value; synth_full_val = synth_full(); spk_unlock(flags); if (ch == '\n') ch = 0x0D; if (synth_full_val || !spk_serial_out(ch)) { schedule_timeout(msecs_to_jiffies(delay_time_val)); continue; } set_current_state(TASK_RUNNING); spk_lock(flags); synth_buffer_getc(); spk_unlock(flags); if (ch == '[') in_escape = 1; else if (ch == ']') in_escape = 0; else if (ch <= SPACE) { if (!in_escape && strchr(",.!?;:", last)) spk_serial_out(PROCSPEECH); if (jiffies >= jiff_max) { if (!in_escape) spk_serial_out(PROCSPEECH); spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; delay_time_val = delay_time->u.n.value; spk_unlock(flags); schedule_timeout(msecs_to_jiffies (delay_time_val)); jiff_max = jiffies + jiffy_delta_val; } } last = ch; } if (!in_escape) spk_serial_out(PROCSPEECH); } static void synth_flush(struct spk_synth *synth) { if (in_escape) { /* if in command output ']' so we don't get an error */ spk_serial_out(']'); } in_escape = 0; is_flushing = 1; spk_serial_out(SYNTH_CLEAR); } module_param_named(ser, synth_dectlk.ser, int, S_IRUGO); module_param_named(start, synth_dectlk.startup, short, S_IRUGO); MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); static int __init dectlk_init(void) { return synth_add(&synth_dectlk); } static void __exit dectlk_exit(void) { synth_remove(&synth_dectlk); } module_init(dectlk_init); module_exit(dectlk_exit); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_AUTHOR("David Borowski"); MODULE_DESCRIPTION("Speakup support for DECtalk Express synthesizers"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
Red--Code/android_kernel_sony_msm8974ac
net/irda/discovery.c
7583
12834
/********************************************************************* * * Filename: discovery.c * Version: 0.1 * Description: Routines for handling discoveries at the IrLMP layer * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Tue Apr 6 15:33:50 1999 * Modified at: Sat Oct 9 17:11:31 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * Modified at: Fri May 28 3:11 CST 1999 * Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl> * * Copyright (c) 1999 Dag Brattli, All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * ********************************************************************/ #include <linux/string.h> #include <linux/socket.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/export.h> #include <net/irda/irda.h> #include <net/irda/irlmp.h> #include <net/irda/discovery.h> #include <asm/unaligned.h> /* * Function irlmp_add_discovery (cachelog, discovery) * * Add a new discovery to the cachelog, and remove any old discoveries * from the same device * * Note : we try to preserve the time this device was *first* discovered * (as opposed to the time of last discovery used for cleanup). This is * used by clients waiting for discovery events to tell if the device * discovered is "new" or just the same old one. They can't rely there * on a binary flag (new/old), because not all discovery events are * propagated to them, and they might not always listen, so they would * miss some new devices popping up... * Jean II */ void irlmp_add_discovery(hashbin_t *cachelog, discovery_t *new) { discovery_t *discovery, *node; unsigned long flags; /* Set time of first discovery if node is new (see below) */ new->firststamp = new->timestamp; spin_lock_irqsave(&cachelog->hb_spinlock, flags); /* * Remove all discoveries of devices that has previously been * discovered on the same link with the same name (info), or the * same daddr. We do this since some devices (mostly PDAs) change * their device address between every discovery. */ discovery = (discovery_t *) hashbin_get_first(cachelog); while (discovery != NULL ) { node = discovery; /* Be sure to stay one item ahead */ discovery = (discovery_t *) hashbin_get_next(cachelog); if ((node->data.saddr == new->data.saddr) && ((node->data.daddr == new->data.daddr) || (strcmp(node->data.info, new->data.info) == 0))) { /* This discovery is a previous discovery * from the same device, so just remove it */ hashbin_remove_this(cachelog, (irda_queue_t *) node); /* Check if hints bits are unchanged */ if (get_unaligned((__u16 *)node->data.hints) == get_unaligned((__u16 *)new->data.hints)) /* Set time of first discovery for this node */ new->firststamp = node->firststamp; kfree(node); } } /* Insert the new and updated version */ hashbin_insert(cachelog, (irda_queue_t *) new, new->data.daddr, NULL); spin_unlock_irqrestore(&cachelog->hb_spinlock, flags); } /* * Function irlmp_add_discovery_log (cachelog, log) * * Merge a disovery log into the cachelog. * */ void irlmp_add_discovery_log(hashbin_t *cachelog, hashbin_t *log) { discovery_t *discovery; IRDA_DEBUG(4, "%s()\n", __func__); /* * If log is missing this means that IrLAP was unable to perform the * discovery, so restart discovery again with just the half timeout * of the normal one. */ /* Well... It means that there was nobody out there - Jean II */ if (log == NULL) { /* irlmp_start_discovery_timer(irlmp, 150); */ return; } /* * Locking : we are the only owner of this discovery log, so * no need to lock it. * We just need to lock the global log in irlmp_add_discovery(). */ discovery = (discovery_t *) hashbin_remove_first(log); while (discovery != NULL) { irlmp_add_discovery(cachelog, discovery); discovery = (discovery_t *) hashbin_remove_first(log); } /* Delete the now empty log */ hashbin_delete(log, (FREE_FUNC) kfree); } /* * Function irlmp_expire_discoveries (log, saddr, force) * * Go through all discoveries and expire all that has stayed too long * * Note : this assume that IrLAP won't change its saddr, which * currently is a valid assumption... */ void irlmp_expire_discoveries(hashbin_t *log, __u32 saddr, int force) { discovery_t * discovery; discovery_t * curr; unsigned long flags; discinfo_t * buffer = NULL; int n; /* Size of the full log */ int i = 0; /* How many we expired */ IRDA_ASSERT(log != NULL, return;); IRDA_DEBUG(4, "%s()\n", __func__); spin_lock_irqsave(&log->hb_spinlock, flags); discovery = (discovery_t *) hashbin_get_first(log); while (discovery != NULL) { /* Be sure to be one item ahead */ curr = discovery; discovery = (discovery_t *) hashbin_get_next(log); /* Test if it's time to expire this discovery */ if ((curr->data.saddr == saddr) && (force || ((jiffies - curr->timestamp) > DISCOVERY_EXPIRE_TIMEOUT))) { /* Create buffer as needed. * As this function get called a lot and most time * we don't have anything to put in the log (we are * quite picky), we can save a lot of overhead * by not calling kmalloc. Jean II */ if(buffer == NULL) { /* Create the client specific buffer */ n = HASHBIN_GET_SIZE(log); buffer = kmalloc(n * sizeof(struct irda_device_info), GFP_ATOMIC); if (buffer == NULL) { spin_unlock_irqrestore(&log->hb_spinlock, flags); return; } } /* Copy discovery information */ memcpy(&(buffer[i]), &(curr->data), sizeof(discinfo_t)); i++; /* Remove it from the log */ curr = hashbin_remove_this(log, (irda_queue_t *) curr); kfree(curr); } } /* Drop the spinlock before calling the higher layers, as * we can't guarantee they won't call us back and create a * deadlock. We will work on our own private data, so we * don't care to be interrupted. - Jean II */ spin_unlock_irqrestore(&log->hb_spinlock, flags); if(buffer == NULL) return; /* Tell IrLMP and registered clients about it */ irlmp_discovery_expiry(buffer, i); /* Free up our buffer */ kfree(buffer); } #if 0 /* * Function irlmp_dump_discoveries (log) * * Print out all discoveries in log * */ void irlmp_dump_discoveries(hashbin_t *log) { discovery_t *discovery; IRDA_ASSERT(log != NULL, return;); discovery = (discovery_t *) hashbin_get_first(log); while (discovery != NULL) { IRDA_DEBUG(0, "Discovery:\n"); IRDA_DEBUG(0, " daddr=%08x\n", discovery->data.daddr); IRDA_DEBUG(0, " saddr=%08x\n", discovery->data.saddr); IRDA_DEBUG(0, " nickname=%s\n", discovery->data.info); discovery = (discovery_t *) hashbin_get_next(log); } } #endif /* * Function irlmp_copy_discoveries (log, pn, mask) * * Copy all discoveries in a buffer * * This function implement a safe way for lmp clients to access the * discovery log. The basic problem is that we don't want the log * to change (add/remove) while the client is reading it. If the * lmp client manipulate directly the hashbin, he is sure to get * into troubles... * The idea is that we copy all the current discovery log in a buffer * which is specific to the client and pass this copy to him. As we * do this operation with the spinlock grabbed, we are safe... * Note : we don't want those clients to grab the spinlock, because * we have no control on how long they will hold it... * Note : we choose to copy the log in "struct irda_device_info" to * save space... * Note : the client must kfree himself() the log... * Jean II */ struct irda_device_info *irlmp_copy_discoveries(hashbin_t *log, int *pn, __u16 mask, int old_entries) { discovery_t * discovery; unsigned long flags; discinfo_t * buffer = NULL; int j_timeout = (sysctl_discovery_timeout * HZ); int n; /* Size of the full log */ int i = 0; /* How many we picked */ IRDA_ASSERT(pn != NULL, return NULL;); IRDA_ASSERT(log != NULL, return NULL;); /* Save spin lock */ spin_lock_irqsave(&log->hb_spinlock, flags); discovery = (discovery_t *) hashbin_get_first(log); while (discovery != NULL) { /* Mask out the ones we don't want : * We want to match the discovery mask, and to get only * the most recent one (unless we want old ones) */ if ((get_unaligned((__u16 *)discovery->data.hints) & mask) && ((old_entries) || ((jiffies - discovery->firststamp) < j_timeout))) { /* Create buffer as needed. * As this function get called a lot and most time * we don't have anything to put in the log (we are * quite picky), we can save a lot of overhead * by not calling kmalloc. Jean II */ if(buffer == NULL) { /* Create the client specific buffer */ n = HASHBIN_GET_SIZE(log); buffer = kmalloc(n * sizeof(struct irda_device_info), GFP_ATOMIC); if (buffer == NULL) { spin_unlock_irqrestore(&log->hb_spinlock, flags); return NULL; } } /* Copy discovery information */ memcpy(&(buffer[i]), &(discovery->data), sizeof(discinfo_t)); i++; } discovery = (discovery_t *) hashbin_get_next(log); } spin_unlock_irqrestore(&log->hb_spinlock, flags); /* Get the actual number of device in the buffer and return */ *pn = i; return buffer; } #ifdef CONFIG_PROC_FS static inline discovery_t *discovery_seq_idx(loff_t pos) { discovery_t *discovery; for (discovery = (discovery_t *) hashbin_get_first(irlmp->cachelog); discovery != NULL; discovery = (discovery_t *) hashbin_get_next(irlmp->cachelog)) { if (pos-- == 0) break; } return discovery; } static void *discovery_seq_start(struct seq_file *seq, loff_t *pos) { spin_lock_irq(&irlmp->cachelog->hb_spinlock); return *pos ? discovery_seq_idx(*pos - 1) : SEQ_START_TOKEN; } static void *discovery_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return (v == SEQ_START_TOKEN) ? (void *) hashbin_get_first(irlmp->cachelog) : (void *) hashbin_get_next(irlmp->cachelog); } static void discovery_seq_stop(struct seq_file *seq, void *v) { spin_unlock_irq(&irlmp->cachelog->hb_spinlock); } static int discovery_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "IrLMP: Discovery log:\n\n"); else { const discovery_t *discovery = v; seq_printf(seq, "nickname: %s, hint: 0x%02x%02x", discovery->data.info, discovery->data.hints[0], discovery->data.hints[1]); #if 0 if ( discovery->data.hints[0] & HINT_PNP) seq_puts(seq, "PnP Compatible "); if ( discovery->data.hints[0] & HINT_PDA) seq_puts(seq, "PDA/Palmtop "); if ( discovery->data.hints[0] & HINT_COMPUTER) seq_puts(seq, "Computer "); if ( discovery->data.hints[0] & HINT_PRINTER) seq_puts(seq, "Printer "); if ( discovery->data.hints[0] & HINT_MODEM) seq_puts(seq, "Modem "); if ( discovery->data.hints[0] & HINT_FAX) seq_puts(seq, "Fax "); if ( discovery->data.hints[0] & HINT_LAN) seq_puts(seq, "LAN Access "); if ( discovery->data.hints[1] & HINT_TELEPHONY) seq_puts(seq, "Telephony "); if ( discovery->data.hints[1] & HINT_FILE_SERVER) seq_puts(seq, "File Server "); if ( discovery->data.hints[1] & HINT_COMM) seq_puts(seq, "IrCOMM "); if ( discovery->data.hints[1] & HINT_OBEX) seq_puts(seq, "IrOBEX "); #endif seq_printf(seq,", saddr: 0x%08x, daddr: 0x%08x\n\n", discovery->data.saddr, discovery->data.daddr); seq_putc(seq, '\n'); } return 0; } static const struct seq_operations discovery_seq_ops = { .start = discovery_seq_start, .next = discovery_seq_next, .stop = discovery_seq_stop, .show = discovery_seq_show, }; static int discovery_seq_open(struct inode *inode, struct file *file) { IRDA_ASSERT(irlmp != NULL, return -EINVAL;); return seq_open(file, &discovery_seq_ops); } const struct file_operations discovery_seq_fops = { .owner = THIS_MODULE, .open = discovery_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif
gpl-2.0
mifl/android_kernel_pantech_ef45k
arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c
7839
78145
/* * SH7757 (B0 step) Pinmux * * Copyright (C) 2009-2010 Renesas Solutions Corp. * * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com> * * Based on SH7723 Pinmux * Copyright (C) 2008 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <cpu/sh7757.h> enum { PINMUX_RESERVED = 0, PINMUX_DATA_BEGIN, PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA, PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA, PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA, PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA, PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA, PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA, PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA, PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA, PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA, PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA, PTG7_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA, PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA, PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA, PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA, PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA, PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA, PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA, PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA, PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA, PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA, PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA, PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA, PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA, PTP7_DATA, PTP6_DATA, PTP5_DATA, PTP4_DATA, PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA, PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA, PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA, PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA, PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA, PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA, PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA, PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA, PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA, PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA, PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA, PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA, PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA, PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA, PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA, PINMUX_DATA_END, PINMUX_INPUT_BEGIN, PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN, PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN, PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN, PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN, PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN, PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN, PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN, PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN, PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN, PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN, PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN, PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN, PTG7_IN, PTG6_IN, PTG5_IN, PTG4_IN, PTG3_IN, PTG2_IN, PTG1_IN, PTG0_IN, PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN, PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN, PTI7_IN, PTI6_IN, PTI5_IN, PTI4_IN, PTI3_IN, PTI2_IN, PTI1_IN, PTI0_IN, PTJ6_IN, PTJ5_IN, PTJ4_IN, PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN, PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN, PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN, PTL6_IN, PTL5_IN, PTL4_IN, PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN, PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN, PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN, PTN6_IN, PTN5_IN, PTN4_IN, PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN, PTO7_IN, PTO6_IN, PTO5_IN, PTO4_IN, PTO3_IN, PTO2_IN, PTO1_IN, PTO0_IN, PTP7_IN, PTP6_IN, PTP5_IN, PTP4_IN, PTP3_IN, PTP2_IN, PTP1_IN, PTP0_IN, PTQ6_IN, PTQ5_IN, PTQ4_IN, PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN, PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN, PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN, PTS7_IN, PTS6_IN, PTS5_IN, PTS4_IN, PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN, PTT7_IN, PTT6_IN, PTT5_IN, PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN, PTU7_IN, PTU6_IN, PTU5_IN, PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN, PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN, PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN, PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN, PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN, PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN, PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN, PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN, PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN, PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN, PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN, PINMUX_INPUT_END, PINMUX_INPUT_PULLUP_BEGIN, PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU, PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU, PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU, PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU, PTE7_IN_PU, PTE6_IN_PU, PTE5_IN_PU, PTE4_IN_PU, PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU, PTF7_IN_PU, PTF6_IN_PU, PTF5_IN_PU, PTF4_IN_PU, PTF3_IN_PU, PTF2_IN_PU, PTF1_IN_PU, PTF0_IN_PU, PTG7_IN_PU, PTG6_IN_PU, PTG4_IN_PU, PTH7_IN_PU, PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU, PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU, PTI7_IN_PU, PTI6_IN_PU, PTI4_IN_PU, PTI3_IN_PU, PTI2_IN_PU, PTI1_IN_PU, PTI0_IN_PU, PTJ6_IN_PU, PTJ5_IN_PU, PTJ4_IN_PU, PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU, PTK7_IN_PU, PTK6_IN_PU, PTK5_IN_PU, PTK4_IN_PU, PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU, PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU, PTL3_IN_PU, PTL2_IN_PU, PTL1_IN_PU, PTL0_IN_PU, PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU, PTN4_IN_PU, PTN3_IN_PU, PTN2_IN_PU, PTN1_IN_PU, PTN0_IN_PU, PTO7_IN_PU, PTO6_IN_PU, PTO5_IN_PU, PTO4_IN_PU, PTO3_IN_PU, PTO2_IN_PU, PTO1_IN_PU, PTO0_IN_PU, PTT7_IN_PU, PTT6_IN_PU, PTT5_IN_PU, PTT4_IN_PU, PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU, PTU7_IN_PU, PTU6_IN_PU, PTU5_IN_PU, PTU4_IN_PU, PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU, PTV7_IN_PU, PTV6_IN_PU, PTV5_IN_PU, PTV4_IN_PU, PTV3_IN_PU, PTV2_IN_PU, PTW1_IN_PU, PTW0_IN_PU, PTX7_IN_PU, PTX6_IN_PU, PTX5_IN_PU, PTX4_IN_PU, PTX3_IN_PU, PTX2_IN_PU, PTX1_IN_PU, PTX0_IN_PU, PTY7_IN_PU, PTY6_IN_PU, PTY5_IN_PU, PTY4_IN_PU, PTY3_IN_PU, PTY2_IN_PU, PTY1_IN_PU, PTY0_IN_PU, PTZ7_IN_PU, PTZ6_IN_PU, PTZ5_IN_PU, PTZ4_IN_PU, PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU, PTZ0_IN_PU, PINMUX_INPUT_PULLUP_END, PINMUX_OUTPUT_BEGIN, PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT, PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT, PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT, PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT, PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT, PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT, PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT, PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT, PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT, PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT, PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT, PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT, PTG7_OUT, PTG6_OUT, PTG5_OUT, PTG4_OUT, PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT, PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT, PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT, PTI7_OUT, PTI6_OUT, PTI5_OUT, PTI4_OUT, PTI3_OUT, PTI2_OUT, PTI1_OUT, PTI0_OUT, PTJ6_OUT, PTJ5_OUT, PTJ4_OUT, PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT, PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT, PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT, PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT, PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT, PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT, PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT, PTO7_OUT, PTO6_OUT, PTO5_OUT, PTO4_OUT, PTO3_OUT, PTO2_OUT, PTO1_OUT, PTO0_OUT, PTP7_OUT, PTP6_OUT, PTP5_OUT, PTP4_OUT, PTP3_OUT, PTP2_OUT, PTP1_OUT, PTP0_OUT, PTQ6_OUT, PTQ5_OUT, PTQ4_OUT, PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT, PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT, PTR3_OUT, PTR2_OUT, PTR1_OUT, PTR0_OUT, PTS7_OUT, PTS6_OUT, PTS5_OUT, PTS4_OUT, PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT, PTT7_OUT, PTT6_OUT, PTT5_OUT, PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT, PTU7_OUT, PTU6_OUT, PTU5_OUT, PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT, PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT, PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT, PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT, PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT, PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT, PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT, PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT, PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT, PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT, PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT, PINMUX_OUTPUT_END, PINMUX_FUNCTION_BEGIN, PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN, PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN, PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN, PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN, PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN, PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN, PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN, PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN, PTE7_FN, PTE6_FN, PTE5_FN, PTE4_FN, PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN, PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN, PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN, PTG7_FN, PTG6_FN, PTG5_FN, PTG4_FN, PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN, PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN, PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN, PTI7_FN, PTI6_FN, PTI5_FN, PTI4_FN, PTI3_FN, PTI2_FN, PTI1_FN, PTI0_FN, PTJ6_FN, PTJ5_FN, PTJ4_FN, PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN, PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN, PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN, PTL6_FN, PTL5_FN, PTL4_FN, PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN, PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN, PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN, PTN6_FN, PTN5_FN, PTN4_FN, PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN, PTO7_FN, PTO6_FN, PTO5_FN, PTO4_FN, PTO3_FN, PTO2_FN, PTO1_FN, PTO0_FN, PTP7_FN, PTP6_FN, PTP5_FN, PTP4_FN, PTP3_FN, PTP2_FN, PTP1_FN, PTP0_FN, PTQ6_FN, PTQ5_FN, PTQ4_FN, PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN, PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN, PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN, PTS7_FN, PTS6_FN, PTS5_FN, PTS4_FN, PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN, PTT7_FN, PTT6_FN, PTT5_FN, PTT4_FN, PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN, PTU7_FN, PTU6_FN, PTU5_FN, PTU4_FN, PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN, PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN, PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN, PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN, PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN, PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN, PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN, PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN, PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN, PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN, PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN, PS0_15_FN1, PS0_15_FN2, PS0_14_FN1, PS0_14_FN2, PS0_13_FN1, PS0_13_FN2, PS0_12_FN1, PS0_12_FN2, PS0_11_FN1, PS0_11_FN2, PS0_10_FN1, PS0_10_FN2, PS0_9_FN1, PS0_9_FN2, PS0_8_FN1, PS0_8_FN2, PS0_7_FN1, PS0_7_FN2, PS0_6_FN1, PS0_6_FN2, PS0_5_FN1, PS0_5_FN2, PS0_4_FN1, PS0_4_FN2, PS0_3_FN1, PS0_3_FN2, PS0_2_FN1, PS0_2_FN2, PS1_10_FN1, PS1_10_FN2, PS1_9_FN1, PS1_9_FN2, PS1_8_FN1, PS1_8_FN2, PS1_2_FN1, PS1_2_FN2, PS2_13_FN1, PS2_13_FN2, PS2_12_FN1, PS2_12_FN2, PS2_7_FN1, PS2_7_FN2, PS2_6_FN1, PS2_6_FN2, PS2_5_FN1, PS2_5_FN2, PS2_4_FN1, PS2_4_FN2, PS2_2_FN1, PS2_2_FN2, PS3_15_FN1, PS3_15_FN2, PS3_14_FN1, PS3_14_FN2, PS3_13_FN1, PS3_13_FN2, PS3_12_FN1, PS3_12_FN2, PS3_11_FN1, PS3_11_FN2, PS3_10_FN1, PS3_10_FN2, PS3_9_FN1, PS3_9_FN2, PS3_8_FN1, PS3_8_FN2, PS3_7_FN1, PS3_7_FN2, PS3_2_FN1, PS3_2_FN2, PS3_1_FN1, PS3_1_FN2, PS4_14_FN1, PS4_14_FN2, PS4_13_FN1, PS4_13_FN2, PS4_12_FN1, PS4_12_FN2, PS4_10_FN1, PS4_10_FN2, PS4_9_FN1, PS4_9_FN2, PS4_8_FN1, PS4_8_FN2, PS4_4_FN1, PS4_4_FN2, PS4_3_FN1, PS4_3_FN2, PS4_2_FN1, PS4_2_FN2, PS4_1_FN1, PS4_1_FN2, PS4_0_FN1, PS4_0_FN2, PS5_11_FN1, PS5_11_FN2, PS5_10_FN1, PS5_10_FN2, PS5_9_FN1, PS5_9_FN2, PS5_8_FN1, PS5_8_FN2, PS5_7_FN1, PS5_7_FN2, PS5_6_FN1, PS5_6_FN2, PS5_5_FN1, PS5_5_FN2, PS5_4_FN1, PS5_4_FN2, PS5_3_FN1, PS5_3_FN2, PS5_2_FN1, PS5_2_FN2, PS6_15_FN1, PS6_15_FN2, PS6_14_FN1, PS6_14_FN2, PS6_13_FN1, PS6_13_FN2, PS6_12_FN1, PS6_12_FN2, PS6_11_FN1, PS6_11_FN2, PS6_10_FN1, PS6_10_FN2, PS6_9_FN1, PS6_9_FN2, PS6_8_FN1, PS6_8_FN2, PS6_7_FN1, PS6_7_FN2, PS6_6_FN1, PS6_6_FN2, PS6_5_FN1, PS6_5_FN2, PS6_4_FN1, PS6_4_FN2, PS6_3_FN1, PS6_3_FN2, PS6_2_FN1, PS6_2_FN2, PS6_1_FN1, PS6_1_FN2, PS6_0_FN1, PS6_0_FN2, PS7_15_FN1, PS7_15_FN2, PS7_14_FN1, PS7_14_FN2, PS7_13_FN1, PS7_13_FN2, PS7_12_FN1, PS7_12_FN2, PS7_11_FN1, PS7_11_FN2, PS7_10_FN1, PS7_10_FN2, PS7_9_FN1, PS7_9_FN2, PS7_8_FN1, PS7_8_FN2, PS7_7_FN1, PS7_7_FN2, PS7_6_FN1, PS7_6_FN2, PS7_5_FN1, PS7_5_FN2, PS7_4_FN1, PS7_4_FN2, PS8_15_FN1, PS8_15_FN2, PS8_14_FN1, PS8_14_FN2, PS8_13_FN1, PS8_13_FN2, PS8_12_FN1, PS8_12_FN2, PS8_11_FN1, PS8_11_FN2, PS8_10_FN1, PS8_10_FN2, PS8_9_FN1, PS8_9_FN2, PS8_8_FN1, PS8_8_FN2, PINMUX_FUNCTION_END, PINMUX_MARK_BEGIN, /* PTA (mobule: LBSC, RGMII) */ BS_MARK, RDWR_MARK, WE1_MARK, RDY_MARK, ET0_MDC_MARK, ET0_MDIO_MARK, ET1_MDC_MARK, ET1_MDIO_MARK, /* PTB (mobule: INTC, ONFI, TMU) */ IRQ15_MARK, IRQ14_MARK, IRQ13_MARK, IRQ12_MARK, IRQ11_MARK, IRQ10_MARK, IRQ9_MARK, IRQ8_MARK, ON_NRE_MARK, ON_NWE_MARK, ON_NWP_MARK, ON_NCE0_MARK, ON_R_B0_MARK, ON_ALE_MARK, ON_CLE_MARK, TCLK_MARK, /* PTC (mobule: IRQ, PWMU) */ IRQ7_MARK, IRQ6_MARK, IRQ5_MARK, IRQ4_MARK, IRQ3_MARK, IRQ2_MARK, IRQ1_MARK, IRQ0_MARK, PWMU0_MARK, PWMU1_MARK, PWMU2_MARK, PWMU3_MARK, PWMU4_MARK, PWMU5_MARK, /* PTD (mobule: SPI0, DMAC) */ SP0_MOSI_MARK, SP0_MISO_MARK, SP0_SCK_MARK, SP0_SCK_FB_MARK, SP0_SS0_MARK, SP0_SS1_MARK, SP0_SS2_MARK, SP0_SS3_MARK, DREQ0_MARK, DACK0_MARK, TEND0_MARK, /* PTE (mobule: RMII) */ RMII0_CRS_DV_MARK, RMII0_TXD1_MARK, RMII0_TXD0_MARK, RMII0_TXEN_MARK, RMII0_REFCLK_MARK, RMII0_RXD1_MARK, RMII0_RXD0_MARK, RMII0_RX_ER_MARK, /* PTF (mobule: RMII, SerMux) */ RMII1_CRS_DV_MARK, RMII1_TXD1_MARK, RMII1_TXD0_MARK, RMII1_TXEN_MARK, RMII1_REFCLK_MARK, RMII1_RXD1_MARK, RMII1_RXD0_MARK, RMII1_RX_ER_MARK, RAC_RI_MARK, /* PTG (mobule: system, LBSC, LPC, WDT, LPC, eMMC) */ BOOTFMS_MARK, BOOTWP_MARK, A25_MARK, A24_MARK, SERIRQ_MARK, WDTOVF_MARK, LPCPD_MARK, LDRQ_MARK, MMCCLK_MARK, MMCCMD_MARK, /* PTH (mobule: SPI1, LPC, DMAC, ADC) */ SP1_MOSI_MARK, SP1_MISO_MARK, SP1_SCK_MARK, SP1_SCK_FB_MARK, SP1_SS0_MARK, SP1_SS1_MARK, WP_MARK, FMS0_MARK, TEND1_MARK, DREQ1_MARK, DACK1_MARK, ADTRG1_MARK, ADTRG0_MARK, /* PTI (mobule: LBSC, SDHI) */ D15_MARK, D14_MARK, D13_MARK, D12_MARK, D11_MARK, D10_MARK, D9_MARK, D8_MARK, SD_WP_MARK, SD_CD_MARK, SD_CLK_MARK, SD_CMD_MARK, SD_D3_MARK, SD_D2_MARK, SD_D1_MARK, SD_D0_MARK, /* PTJ (mobule: SCIF234) */ RTS3_MARK, CTS3_MARK, TXD3_MARK, RXD3_MARK, RTS4_MARK, RXD4_MARK, TXD4_MARK, /* PTK (mobule: SERMUX, LBSC, SCIF) */ COM2_TXD_MARK, COM2_RXD_MARK, COM2_RTS_MARK, COM2_CTS_MARK, COM2_DTR_MARK, COM2_DSR_MARK, COM2_DCD_MARK, CLKOUT_MARK, SCK2_MARK, SCK4_MARK, SCK3_MARK, /* PTL (mobule: SERMUX, SCIF, LBSC, AUD) */ RAC_RXD_MARK, RAC_RTS_MARK, RAC_CTS_MARK, RAC_DTR_MARK, RAC_DSR_MARK, RAC_DCD_MARK, RAC_TXD_MARK, RXD2_MARK, CS5_MARK, CS6_MARK, AUDSYNC_MARK, AUDCK_MARK, TXD2_MARK, /* PTM (mobule: LBSC, IIC) */ CS4_MARK, RD_MARK, WE0_MARK, CS0_MARK, SDA6_MARK, SCL6_MARK, SDA7_MARK, SCL7_MARK, /* PTN (mobule: USB, JMC, SGPIO, WDT) */ VBUS_EN_MARK, VBUS_OC_MARK, JMCTCK_MARK, JMCTMS_MARK, JMCTDO_MARK, JMCTDI_MARK, JMCTRST_MARK, SGPIO1_CLK_MARK, SGPIO1_LOAD_MARK, SGPIO1_DI_MARK, SGPIO1_DO_MARK, SUB_CLKIN_MARK, /* PTO (mobule: SGPIO, SerMux) */ SGPIO0_CLK_MARK, SGPIO0_LOAD_MARK, SGPIO0_DI_MARK, SGPIO0_DO_MARK, SGPIO2_CLK_MARK, SGPIO2_LOAD_MARK, SGPIO2_DI_MARK, SGPIO2_DO_MARK, COM1_TXD_MARK, COM1_RXD_MARK, COM1_RTS_MARK, COM1_CTS_MARK, /* PTQ (mobule: LPC) */ LAD3_MARK, LAD2_MARK, LAD1_MARK, LAD0_MARK, LFRAME_MARK, LRESET_MARK, LCLK_MARK, /* PTR (mobule: GRA, IIC) */ DDC3_MARK, DDC2_MARK, SDA2_MARK, SCL2_MARK, SDA1_MARK, SCL1_MARK, SDA0_MARK, SCL0_MARK, SDA8_MARK, SCL8_MARK, /* PTS (mobule: GRA, IIC) */ DDC1_MARK, DDC0_MARK, SDA5_MARK, SCL5_MARK, SDA4_MARK, SCL4_MARK, SDA3_MARK, SCL3_MARK, SDA9_MARK, SCL9_MARK, /* PTT (mobule: PWMX, AUD) */ PWMX7_MARK, PWMX6_MARK, PWMX5_MARK, PWMX4_MARK, PWMX3_MARK, PWMX2_MARK, PWMX1_MARK, PWMX0_MARK, AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK, STATUS1_MARK, STATUS0_MARK, /* PTU (mobule: LPC, APM) */ LGPIO7_MARK, LGPIO6_MARK, LGPIO5_MARK, LGPIO4_MARK, LGPIO3_MARK, LGPIO2_MARK, LGPIO1_MARK, LGPIO0_MARK, APMONCTL_O_MARK, APMPWBTOUT_O_MARK, APMSCI_O_MARK, APMVDDON_MARK, APMSLPBTN_MARK, APMPWRBTN_MARK, APMS5N_MARK, APMS3N_MARK, /* PTV (mobule: LBSC, SerMux, R-SPI, EVC, GRA) */ A23_MARK, A22_MARK, A21_MARK, A20_MARK, A19_MARK, A18_MARK, A17_MARK, A16_MARK, COM2_RI_MARK, R_SPI_MOSI_MARK, R_SPI_MISO_MARK, R_SPI_RSPCK_MARK, R_SPI_SSL0_MARK, R_SPI_SSL1_MARK, EVENT7_MARK, EVENT6_MARK, VBIOS_DI_MARK, VBIOS_DO_MARK, VBIOS_CLK_MARK, VBIOS_CS_MARK, /* PTW (mobule: LBSC, EVC, SCIF) */ A15_MARK, A14_MARK, A13_MARK, A12_MARK, A11_MARK, A10_MARK, A9_MARK, A8_MARK, EVENT5_MARK, EVENT4_MARK, EVENT3_MARK, EVENT2_MARK, EVENT1_MARK, EVENT0_MARK, CTS4_MARK, CTS2_MARK, /* PTX (mobule: LBSC, SCIF, SIM) */ A7_MARK, A6_MARK, A5_MARK, A4_MARK, A3_MARK, A2_MARK, A1_MARK, A0_MARK, RTS2_MARK, SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK, /* PTY (mobule: LBSC) */ D7_MARK, D6_MARK, D5_MARK, D4_MARK, D3_MARK, D2_MARK, D1_MARK, D0_MARK, /* PTZ (mobule: eMMC, ONFI) */ MMCDAT7_MARK, MMCDAT6_MARK, MMCDAT5_MARK, MMCDAT4_MARK, MMCDAT3_MARK, MMCDAT2_MARK, MMCDAT1_MARK, MMCDAT0_MARK, ON_DQ7_MARK, ON_DQ6_MARK, ON_DQ5_MARK, ON_DQ4_MARK, ON_DQ3_MARK, ON_DQ2_MARK, ON_DQ1_MARK, ON_DQ0_MARK, PINMUX_MARK_END, }; static pinmux_enum_t pinmux_data[] = { /* PTA GPIO */ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT), PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT), PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT), PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT), PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT), PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT), PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT), PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT), /* PTB GPIO */ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT), PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT), PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT), PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT), PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT), PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT), PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT), PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT), /* PTC GPIO */ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT), PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT), PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT), PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT), PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT), PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT), PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT), PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT), /* PTD GPIO */ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT), PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT), PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT), PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT), PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT), PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT), PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT), PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT), /* PTE GPIO */ PINMUX_DATA(PTE7_DATA, PTE7_IN, PTE7_OUT), PINMUX_DATA(PTE6_DATA, PTE6_IN, PTE6_OUT), PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT), PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT), PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT), PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT), PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT), PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT), /* PTF GPIO */ PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT), PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT), PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT), PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT), PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT), PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT), PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT), PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT), /* PTG GPIO */ PINMUX_DATA(PTG7_DATA, PTG7_IN, PTG7_OUT), PINMUX_DATA(PTG6_DATA, PTG6_IN, PTG6_OUT), PINMUX_DATA(PTG5_DATA, PTG5_IN, PTG5_OUT), PINMUX_DATA(PTG4_DATA, PTG4_IN, PTG4_OUT), PINMUX_DATA(PTG3_DATA, PTG3_IN, PTG3_OUT), PINMUX_DATA(PTG2_DATA, PTG2_IN, PTG2_OUT), PINMUX_DATA(PTG1_DATA, PTG1_IN, PTG1_OUT), PINMUX_DATA(PTG0_DATA, PTG0_IN, PTG0_OUT), /* PTH GPIO */ PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT), PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT), PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT), PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT), PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT), PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT), PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT), PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT), /* PTI GPIO */ PINMUX_DATA(PTI7_DATA, PTI7_IN, PTI7_OUT), PINMUX_DATA(PTI6_DATA, PTI6_IN, PTI6_OUT), PINMUX_DATA(PTI5_DATA, PTI5_IN, PTI5_OUT), PINMUX_DATA(PTI4_DATA, PTI4_IN, PTI4_OUT), PINMUX_DATA(PTI3_DATA, PTI3_IN, PTI3_OUT), PINMUX_DATA(PTI2_DATA, PTI2_IN, PTI2_OUT), PINMUX_DATA(PTI1_DATA, PTI1_IN, PTI1_OUT), PINMUX_DATA(PTI0_DATA, PTI0_IN, PTI0_OUT), /* PTJ GPIO */ PINMUX_DATA(PTJ6_DATA, PTJ6_IN, PTJ6_OUT), PINMUX_DATA(PTJ5_DATA, PTJ5_IN, PTJ5_OUT), PINMUX_DATA(PTJ4_DATA, PTJ4_IN, PTJ4_OUT), PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT), PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT), PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT), PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT), /* PTK GPIO */ PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT), PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT), PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT), PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT), PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT), PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT), PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT), PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT), /* PTL GPIO */ PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT), PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT), PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT), PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT), PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT), PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT), PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT), /* PTM GPIO */ PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT), PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT), PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT), PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT), PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT), PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT), PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT), /* PTN GPIO */ PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT), PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT), PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT), PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT), PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT), PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT), PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT), /* PTO GPIO */ PINMUX_DATA(PTO7_DATA, PTO7_IN, PTO7_OUT), PINMUX_DATA(PTO6_DATA, PTO6_IN, PTO6_OUT), PINMUX_DATA(PTO5_DATA, PTO5_IN, PTO5_OUT), PINMUX_DATA(PTO4_DATA, PTO4_IN, PTO4_OUT), PINMUX_DATA(PTO3_DATA, PTO3_IN, PTO3_OUT), PINMUX_DATA(PTO2_DATA, PTO2_IN, PTO2_OUT), PINMUX_DATA(PTO1_DATA, PTO1_IN, PTO1_OUT), PINMUX_DATA(PTO0_DATA, PTO0_IN, PTO0_OUT), /* PTQ GPIO */ PINMUX_DATA(PTQ6_DATA, PTQ6_IN, PTQ6_OUT), PINMUX_DATA(PTQ5_DATA, PTQ5_IN, PTQ5_OUT), PINMUX_DATA(PTQ4_DATA, PTQ4_IN, PTQ4_OUT), PINMUX_DATA(PTQ3_DATA, PTQ3_IN, PTQ3_OUT), PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_OUT), PINMUX_DATA(PTQ1_DATA, PTQ1_IN, PTQ1_OUT), PINMUX_DATA(PTQ0_DATA, PTQ0_IN, PTQ0_OUT), /* PTR GPIO */ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT), PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT), PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT), PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT), PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_OUT), PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_OUT), PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT), PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT), /* PTS GPIO */ PINMUX_DATA(PTS7_DATA, PTS7_IN, PTS7_OUT), PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT), PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT), PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT), PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT), PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT), PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT), PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT), /* PTT GPIO */ PINMUX_DATA(PTT7_DATA, PTT7_IN, PTT7_OUT), PINMUX_DATA(PTT6_DATA, PTT6_IN, PTT6_OUT), PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT), PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT), PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT), PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT), PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT), PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT), /* PTU GPIO */ PINMUX_DATA(PTU7_DATA, PTU7_IN, PTU7_OUT), PINMUX_DATA(PTU6_DATA, PTU6_IN, PTU6_OUT), PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT), PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT), PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT), PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT), PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT), PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT), /* PTV GPIO */ PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT), PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT), PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT), PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT), PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT), PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT), PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT), PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT), /* PTW GPIO */ PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT), PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT), PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT), PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT), PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT), PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT), PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT), PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT), /* PTX GPIO */ PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT), PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT), PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT), PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT), PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT), PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT), PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT), PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT), /* PTY GPIO */ PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT), PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT), PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT), PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT), PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT), PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT), PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT), PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT), /* PTZ GPIO */ PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT), PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT), PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT), PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT), PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT), PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT), PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT), PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT), /* PTA FN */ PINMUX_DATA(BS_MARK, PTA7_FN), PINMUX_DATA(RDWR_MARK, PTA6_FN), PINMUX_DATA(WE1_MARK, PTA5_FN), PINMUX_DATA(RDY_MARK, PTA4_FN), PINMUX_DATA(ET0_MDC_MARK, PTA3_FN), PINMUX_DATA(ET0_MDIO_MARK, PTA2_FN), PINMUX_DATA(ET1_MDC_MARK, PTA1_FN), PINMUX_DATA(ET1_MDIO_MARK, PTA0_FN), /* PTB FN */ PINMUX_DATA(IRQ15_MARK, PS0_15_FN1, PTB7_FN), PINMUX_DATA(ON_NRE_MARK, PS0_15_FN2, PTB7_FN), PINMUX_DATA(IRQ14_MARK, PS0_14_FN1, PTB6_FN), PINMUX_DATA(ON_NWE_MARK, PS0_14_FN2, PTB6_FN), PINMUX_DATA(IRQ13_MARK, PS0_13_FN1, PTB5_FN), PINMUX_DATA(ON_NWP_MARK, PS0_13_FN2, PTB5_FN), PINMUX_DATA(IRQ12_MARK, PS0_12_FN1, PTB4_FN), PINMUX_DATA(ON_NCE0_MARK, PS0_12_FN2, PTB4_FN), PINMUX_DATA(IRQ11_MARK, PS0_11_FN1, PTB3_FN), PINMUX_DATA(ON_R_B0_MARK, PS0_11_FN2, PTB3_FN), PINMUX_DATA(IRQ10_MARK, PS0_10_FN1, PTB2_FN), PINMUX_DATA(ON_ALE_MARK, PS0_10_FN2, PTB2_FN), PINMUX_DATA(IRQ9_MARK, PS0_9_FN1, PTB1_FN), PINMUX_DATA(ON_CLE_MARK, PS0_9_FN2, PTB1_FN), PINMUX_DATA(IRQ8_MARK, PS0_8_FN1, PTB0_FN), PINMUX_DATA(TCLK_MARK, PS0_8_FN2, PTB0_FN), /* PTC FN */ PINMUX_DATA(IRQ7_MARK, PS0_7_FN1, PTC7_FN), PINMUX_DATA(PWMU0_MARK, PS0_7_FN2, PTC7_FN), PINMUX_DATA(IRQ6_MARK, PS0_6_FN1, PTC6_FN), PINMUX_DATA(PWMU1_MARK, PS0_6_FN2, PTC6_FN), PINMUX_DATA(IRQ5_MARK, PS0_5_FN1, PTC5_FN), PINMUX_DATA(PWMU2_MARK, PS0_5_FN2, PTC5_FN), PINMUX_DATA(IRQ4_MARK, PS0_4_FN1, PTC5_FN), PINMUX_DATA(PWMU3_MARK, PS0_4_FN2, PTC4_FN), PINMUX_DATA(IRQ3_MARK, PS0_3_FN1, PTC3_FN), PINMUX_DATA(PWMU4_MARK, PS0_3_FN2, PTC3_FN), PINMUX_DATA(IRQ2_MARK, PS0_2_FN1, PTC2_FN), PINMUX_DATA(PWMU5_MARK, PS0_2_FN2, PTC2_FN), PINMUX_DATA(IRQ1_MARK, PTC1_FN), PINMUX_DATA(IRQ0_MARK, PTC0_FN), /* PTD FN */ PINMUX_DATA(SP0_MOSI_MARK, PTD7_FN), PINMUX_DATA(SP0_MISO_MARK, PTD6_FN), PINMUX_DATA(SP0_SCK_MARK, PTD5_FN), PINMUX_DATA(SP0_SCK_FB_MARK, PTD4_FN), PINMUX_DATA(SP0_SS0_MARK, PTD3_FN), PINMUX_DATA(SP0_SS1_MARK, PS1_10_FN1, PTD2_FN), PINMUX_DATA(DREQ0_MARK, PS1_10_FN2, PTD2_FN), PINMUX_DATA(SP0_SS2_MARK, PS1_9_FN1, PTD1_FN), PINMUX_DATA(DACK0_MARK, PS1_9_FN2, PTD1_FN), PINMUX_DATA(SP0_SS3_MARK, PS1_8_FN1, PTD0_FN), PINMUX_DATA(TEND0_MARK, PS1_8_FN2, PTD0_FN), /* PTE FN */ PINMUX_DATA(RMII0_CRS_DV_MARK, PTE7_FN), PINMUX_DATA(RMII0_TXD1_MARK, PTE6_FN), PINMUX_DATA(RMII0_TXD0_MARK, PTE5_FN), PINMUX_DATA(RMII0_TXEN_MARK, PTE4_FN), PINMUX_DATA(RMII0_REFCLK_MARK, PTE3_FN), PINMUX_DATA(RMII0_RXD1_MARK, PTE2_FN), PINMUX_DATA(RMII0_RXD0_MARK, PTE1_FN), PINMUX_DATA(RMII0_RX_ER_MARK, PTE0_FN), /* PTF FN */ PINMUX_DATA(RMII1_CRS_DV_MARK, PTF7_FN), PINMUX_DATA(RMII1_TXD1_MARK, PTF6_FN), PINMUX_DATA(RMII1_TXD0_MARK, PTF5_FN), PINMUX_DATA(RMII1_TXEN_MARK, PTF4_FN), PINMUX_DATA(RMII1_REFCLK_MARK, PTF3_FN), PINMUX_DATA(RMII1_RXD1_MARK, PS1_2_FN1, PTF2_FN), PINMUX_DATA(RAC_RI_MARK, PS1_2_FN2, PTF2_FN), PINMUX_DATA(RMII1_RXD0_MARK, PTF1_FN), PINMUX_DATA(RMII1_RX_ER_MARK, PTF0_FN), /* PTG FN */ PINMUX_DATA(BOOTFMS_MARK, PTG7_FN), PINMUX_DATA(BOOTWP_MARK, PTG6_FN), PINMUX_DATA(A25_MARK, PS2_13_FN1, PTG5_FN), PINMUX_DATA(MMCCLK_MARK, PS2_13_FN2, PTG5_FN), PINMUX_DATA(A24_MARK, PS2_12_FN1, PTG4_FN), PINMUX_DATA(MMCCMD_MARK, PS2_12_FN2, PTG4_FN), PINMUX_DATA(SERIRQ_MARK, PTG3_FN), PINMUX_DATA(WDTOVF_MARK, PTG2_FN), PINMUX_DATA(LPCPD_MARK, PTG1_FN), PINMUX_DATA(LDRQ_MARK, PTG0_FN), /* PTH FN */ PINMUX_DATA(SP1_MOSI_MARK, PS2_7_FN1, PTH7_FN), PINMUX_DATA(TEND1_MARK, PS2_7_FN2, PTH7_FN), PINMUX_DATA(SP1_MISO_MARK, PS2_6_FN1, PTH6_FN), PINMUX_DATA(DREQ1_MARK, PS2_6_FN2, PTH6_FN), PINMUX_DATA(SP1_SCK_MARK, PS2_5_FN1, PTH5_FN), PINMUX_DATA(DACK1_MARK, PS2_5_FN2, PTH5_FN), PINMUX_DATA(SP1_SCK_FB_MARK, PS2_4_FN1, PTH4_FN), PINMUX_DATA(ADTRG1_MARK, PS2_4_FN2, PTH4_FN), PINMUX_DATA(SP1_SS0_MARK, PTH3_FN), PINMUX_DATA(SP1_SS1_MARK, PS2_2_FN1, PTH2_FN), PINMUX_DATA(ADTRG0_MARK, PS2_2_FN2, PTH2_FN), PINMUX_DATA(WP_MARK, PTH1_FN), PINMUX_DATA(FMS0_MARK, PTH0_FN), /* PTI FN */ PINMUX_DATA(D15_MARK, PS3_15_FN1, PTI7_FN), PINMUX_DATA(SD_WP_MARK, PS3_15_FN2, PTI7_FN), PINMUX_DATA(D14_MARK, PS3_14_FN1, PTI6_FN), PINMUX_DATA(SD_CD_MARK, PS3_14_FN2, PTI6_FN), PINMUX_DATA(D13_MARK, PS3_13_FN1, PTI5_FN), PINMUX_DATA(SD_CLK_MARK, PS3_13_FN2, PTI5_FN), PINMUX_DATA(D12_MARK, PS3_12_FN1, PTI4_FN), PINMUX_DATA(SD_CMD_MARK, PS3_12_FN2, PTI4_FN), PINMUX_DATA(D11_MARK, PS3_11_FN1, PTI3_FN), PINMUX_DATA(SD_D3_MARK, PS3_11_FN2, PTI3_FN), PINMUX_DATA(D10_MARK, PS3_10_FN1, PTI2_FN), PINMUX_DATA(SD_D2_MARK, PS3_10_FN2, PTI2_FN), PINMUX_DATA(D9_MARK, PS3_9_FN1, PTI1_FN), PINMUX_DATA(SD_D1_MARK, PS3_9_FN2, PTI1_FN), PINMUX_DATA(D8_MARK, PS3_8_FN1, PTI0_FN), PINMUX_DATA(SD_D0_MARK, PS3_8_FN2, PTI0_FN), /* PTJ FN */ PINMUX_DATA(RTS3_MARK, PTJ6_FN), PINMUX_DATA(CTS3_MARK, PTJ5_FN), PINMUX_DATA(TXD3_MARK, PTJ4_FN), PINMUX_DATA(RXD3_MARK, PTJ3_FN), PINMUX_DATA(RTS4_MARK, PTJ2_FN), PINMUX_DATA(RXD4_MARK, PTJ1_FN), PINMUX_DATA(TXD4_MARK, PTJ0_FN), /* PTK FN */ PINMUX_DATA(COM2_TXD_MARK, PS3_7_FN1, PTK7_FN), PINMUX_DATA(SCK2_MARK, PS3_7_FN2, PTK7_FN), PINMUX_DATA(COM2_RXD_MARK, PTK6_FN), PINMUX_DATA(COM2_RTS_MARK, PTK5_FN), PINMUX_DATA(COM2_CTS_MARK, PTK4_FN), PINMUX_DATA(COM2_DTR_MARK, PTK3_FN), PINMUX_DATA(COM2_DSR_MARK, PS3_2_FN1, PTK2_FN), PINMUX_DATA(SCK4_MARK, PS3_2_FN2, PTK2_FN), PINMUX_DATA(COM2_DCD_MARK, PS3_1_FN1, PTK1_FN), PINMUX_DATA(SCK3_MARK, PS3_1_FN2, PTK1_FN), PINMUX_DATA(CLKOUT_MARK, PTK0_FN), /* PTL FN */ PINMUX_DATA(RAC_RXD_MARK, PS4_14_FN1, PTL6_FN), PINMUX_DATA(RXD2_MARK, PS4_14_FN2, PTL6_FN), PINMUX_DATA(RAC_RTS_MARK, PS4_13_FN1, PTL5_FN), PINMUX_DATA(CS5_MARK, PS4_13_FN2, PTL5_FN), PINMUX_DATA(RAC_CTS_MARK, PS4_12_FN1, PTL4_FN), PINMUX_DATA(CS6_MARK, PS4_12_FN2, PTL4_FN), PINMUX_DATA(RAC_DTR_MARK, PTL3_FN), PINMUX_DATA(RAC_DSR_MARK, PS4_10_FN1, PTL2_FN), PINMUX_DATA(AUDSYNC_MARK, PS4_10_FN2, PTL2_FN), PINMUX_DATA(RAC_DCD_MARK, PS4_9_FN1, PTL1_FN), PINMUX_DATA(AUDCK_MARK, PS4_9_FN2, PTL1_FN), PINMUX_DATA(RAC_TXD_MARK, PS4_8_FN1, PTL0_FN), PINMUX_DATA(TXD2_MARK, PS4_8_FN1, PTL0_FN), /* PTM FN */ PINMUX_DATA(CS4_MARK, PTM7_FN), PINMUX_DATA(RD_MARK, PTM6_FN), PINMUX_DATA(WE0_MARK, PTM7_FN), PINMUX_DATA(CS0_MARK, PTM4_FN), PINMUX_DATA(SDA6_MARK, PTM3_FN), PINMUX_DATA(SCL6_MARK, PTM2_FN), PINMUX_DATA(SDA7_MARK, PTM1_FN), PINMUX_DATA(SCL7_MARK, PTM0_FN), /* PTN FN */ PINMUX_DATA(VBUS_EN_MARK, PTN6_FN), PINMUX_DATA(VBUS_OC_MARK, PTN5_FN), PINMUX_DATA(JMCTCK_MARK, PS4_4_FN1, PTN4_FN), PINMUX_DATA(SGPIO1_CLK_MARK, PS4_4_FN2, PTN4_FN), PINMUX_DATA(JMCTMS_MARK, PS4_3_FN1, PTN5_FN), PINMUX_DATA(SGPIO1_LOAD_MARK, PS4_3_FN2, PTN5_FN), PINMUX_DATA(JMCTDO_MARK, PS4_2_FN1, PTN2_FN), PINMUX_DATA(SGPIO1_DO_MARK, PS4_2_FN2, PTN2_FN), PINMUX_DATA(JMCTDI_MARK, PS4_1_FN1, PTN1_FN), PINMUX_DATA(SGPIO1_DI_MARK, PS4_1_FN2, PTN1_FN), PINMUX_DATA(JMCTRST_MARK, PS4_0_FN1, PTN0_FN), PINMUX_DATA(SUB_CLKIN_MARK, PS4_0_FN2, PTN0_FN), /* PTO FN */ PINMUX_DATA(SGPIO0_CLK_MARK, PTO7_FN), PINMUX_DATA(SGPIO0_LOAD_MARK, PTO6_FN), PINMUX_DATA(SGPIO0_DI_MARK, PTO5_FN), PINMUX_DATA(SGPIO0_DO_MARK, PTO4_FN), PINMUX_DATA(SGPIO2_CLK_MARK, PS5_11_FN1, PTO3_FN), PINMUX_DATA(COM1_TXD_MARK, PS5_11_FN2, PTO3_FN), PINMUX_DATA(SGPIO2_LOAD_MARK, PS5_10_FN1, PTO2_FN), PINMUX_DATA(COM1_RXD_MARK, PS5_10_FN2, PTO2_FN), PINMUX_DATA(SGPIO2_DI_MARK, PS5_9_FN1, PTO1_FN), PINMUX_DATA(COM1_RTS_MARK, PS5_9_FN2, PTO1_FN), PINMUX_DATA(SGPIO2_DO_MARK, PS5_8_FN1, PTO0_FN), PINMUX_DATA(COM1_CTS_MARK, PS5_8_FN2, PTO0_FN), /* PTP FN */ /* PTQ FN */ PINMUX_DATA(LAD3_MARK, PTQ6_FN), PINMUX_DATA(LAD2_MARK, PTQ5_FN), PINMUX_DATA(LAD1_MARK, PTQ4_FN), PINMUX_DATA(LAD0_MARK, PTQ3_FN), PINMUX_DATA(LFRAME_MARK, PTQ2_FN), PINMUX_DATA(LRESET_MARK, PTQ1_FN), PINMUX_DATA(LCLK_MARK, PTQ0_FN), /* PTR FN */ PINMUX_DATA(SDA8_MARK, PTR7_FN), /* DDC3? */ PINMUX_DATA(SCL8_MARK, PTR6_FN), /* DDC2? */ PINMUX_DATA(SDA2_MARK, PTR5_FN), PINMUX_DATA(SCL2_MARK, PTR4_FN), PINMUX_DATA(SDA1_MARK, PTR3_FN), PINMUX_DATA(SCL1_MARK, PTR2_FN), PINMUX_DATA(SDA0_MARK, PTR1_FN), PINMUX_DATA(SCL0_MARK, PTR0_FN), /* PTS FN */ PINMUX_DATA(SDA9_MARK, PTS7_FN), /* DDC1? */ PINMUX_DATA(SCL9_MARK, PTS6_FN), /* DDC0? */ PINMUX_DATA(SDA5_MARK, PTS5_FN), PINMUX_DATA(SCL5_MARK, PTS4_FN), PINMUX_DATA(SDA4_MARK, PTS3_FN), PINMUX_DATA(SCL4_MARK, PTS2_FN), PINMUX_DATA(SDA3_MARK, PTS1_FN), PINMUX_DATA(SCL3_MARK, PTS0_FN), /* PTT FN */ PINMUX_DATA(PWMX7_MARK, PS5_7_FN1, PTT7_FN), PINMUX_DATA(AUDATA3_MARK, PS5_7_FN2, PTT7_FN), PINMUX_DATA(PWMX6_MARK, PS5_6_FN1, PTT6_FN), PINMUX_DATA(AUDATA2_MARK, PS5_6_FN2, PTT6_FN), PINMUX_DATA(PWMX5_MARK, PS5_5_FN1, PTT5_FN), PINMUX_DATA(AUDATA1_MARK, PS5_5_FN2, PTT5_FN), PINMUX_DATA(PWMX4_MARK, PS5_4_FN1, PTT4_FN), PINMUX_DATA(AUDATA0_MARK, PS5_4_FN2, PTT4_FN), PINMUX_DATA(PWMX3_MARK, PS5_3_FN1, PTT3_FN), PINMUX_DATA(STATUS1_MARK, PS5_3_FN2, PTT3_FN), PINMUX_DATA(PWMX2_MARK, PS5_2_FN1, PTT2_FN), PINMUX_DATA(STATUS0_MARK, PS5_2_FN2, PTT2_FN), PINMUX_DATA(PWMX1_MARK, PTT1_FN), PINMUX_DATA(PWMX0_MARK, PTT0_FN), /* PTU FN */ PINMUX_DATA(LGPIO7_MARK, PS6_15_FN1, PTU7_FN), PINMUX_DATA(APMONCTL_O_MARK, PS6_15_FN2, PTU7_FN), PINMUX_DATA(LGPIO6_MARK, PS6_14_FN1, PTU6_FN), PINMUX_DATA(APMPWBTOUT_O_MARK, PS6_14_FN2, PTU6_FN), PINMUX_DATA(LGPIO5_MARK, PS6_13_FN1, PTU5_FN), PINMUX_DATA(APMSCI_O_MARK, PS6_13_FN2, PTU5_FN), PINMUX_DATA(LGPIO4_MARK, PS6_12_FN1, PTU4_FN), PINMUX_DATA(APMVDDON_MARK, PS6_12_FN2, PTU4_FN), PINMUX_DATA(LGPIO3_MARK, PS6_11_FN1, PTU3_FN), PINMUX_DATA(APMSLPBTN_MARK, PS6_11_FN2, PTU3_FN), PINMUX_DATA(LGPIO2_MARK, PS6_10_FN1, PTU2_FN), PINMUX_DATA(APMPWRBTN_MARK, PS6_10_FN2, PTU2_FN), PINMUX_DATA(LGPIO1_MARK, PS6_9_FN1, PTU1_FN), PINMUX_DATA(APMS5N_MARK, PS6_9_FN2, PTU1_FN), PINMUX_DATA(LGPIO0_MARK, PS6_8_FN1, PTU0_FN), PINMUX_DATA(APMS3N_MARK, PS6_8_FN2, PTU0_FN), /* PTV FN */ PINMUX_DATA(A23_MARK, PS6_7_FN1, PTV7_FN), PINMUX_DATA(COM2_RI_MARK, PS6_7_FN2, PTV7_FN), PINMUX_DATA(A22_MARK, PS6_6_FN1, PTV6_FN), PINMUX_DATA(R_SPI_MOSI_MARK, PS6_6_FN2, PTV6_FN), PINMUX_DATA(A21_MARK, PS6_5_FN1, PTV5_FN), PINMUX_DATA(R_SPI_MISO_MARK, PS6_5_FN2, PTV5_FN), PINMUX_DATA(A20_MARK, PS6_4_FN1, PTV4_FN), PINMUX_DATA(R_SPI_RSPCK_MARK, PS6_4_FN2, PTV4_FN), PINMUX_DATA(A19_MARK, PS6_3_FN1, PTV3_FN), PINMUX_DATA(R_SPI_SSL0_MARK, PS6_3_FN2, PTV3_FN), PINMUX_DATA(A18_MARK, PS6_2_FN1, PTV2_FN), PINMUX_DATA(R_SPI_SSL1_MARK, PS6_2_FN2, PTV2_FN), PINMUX_DATA(A17_MARK, PS6_1_FN1, PTV1_FN), PINMUX_DATA(EVENT7_MARK, PS6_1_FN2, PTV1_FN), PINMUX_DATA(A16_MARK, PS6_0_FN1, PTV0_FN), PINMUX_DATA(EVENT6_MARK, PS6_0_FN1, PTV0_FN), /* PTW FN */ PINMUX_DATA(A15_MARK, PS7_15_FN1, PTW7_FN), PINMUX_DATA(EVENT5_MARK, PS7_15_FN2, PTW7_FN), PINMUX_DATA(A14_MARK, PS7_14_FN1, PTW6_FN), PINMUX_DATA(EVENT4_MARK, PS7_14_FN2, PTW6_FN), PINMUX_DATA(A13_MARK, PS7_13_FN1, PTW5_FN), PINMUX_DATA(EVENT3_MARK, PS7_13_FN2, PTW5_FN), PINMUX_DATA(A12_MARK, PS7_12_FN1, PTW4_FN), PINMUX_DATA(EVENT2_MARK, PS7_12_FN2, PTW4_FN), PINMUX_DATA(A11_MARK, PS7_11_FN1, PTW3_FN), PINMUX_DATA(EVENT1_MARK, PS7_11_FN2, PTW3_FN), PINMUX_DATA(A10_MARK, PS7_10_FN1, PTW2_FN), PINMUX_DATA(EVENT0_MARK, PS7_10_FN2, PTW2_FN), PINMUX_DATA(A9_MARK, PS7_9_FN1, PTW1_FN), PINMUX_DATA(CTS4_MARK, PS7_9_FN2, PTW1_FN), PINMUX_DATA(A8_MARK, PS7_8_FN1, PTW0_FN), PINMUX_DATA(CTS2_MARK, PS7_8_FN2, PTW0_FN), /* PTX FN */ PINMUX_DATA(A7_MARK, PS7_7_FN1, PTX7_FN), PINMUX_DATA(RTS2_MARK, PS7_7_FN2, PTX7_FN), PINMUX_DATA(A6_MARK, PS7_6_FN1, PTX6_FN), PINMUX_DATA(SIM_D_MARK, PS7_6_FN2, PTX6_FN), PINMUX_DATA(A5_MARK, PS7_5_FN1, PTX5_FN), PINMUX_DATA(SIM_CLK_MARK, PS7_5_FN2, PTX5_FN), PINMUX_DATA(A4_MARK, PS7_4_FN1, PTX4_FN), PINMUX_DATA(SIM_RST_MARK, PS7_4_FN2, PTX4_FN), PINMUX_DATA(A3_MARK, PTX3_FN), PINMUX_DATA(A2_MARK, PTX2_FN), PINMUX_DATA(A1_MARK, PTX1_FN), PINMUX_DATA(A0_MARK, PTX0_FN), /* PTY FN */ PINMUX_DATA(D7_MARK, PTY7_FN), PINMUX_DATA(D6_MARK, PTY6_FN), PINMUX_DATA(D5_MARK, PTY5_FN), PINMUX_DATA(D4_MARK, PTY4_FN), PINMUX_DATA(D3_MARK, PTY3_FN), PINMUX_DATA(D2_MARK, PTY2_FN), PINMUX_DATA(D1_MARK, PTY1_FN), PINMUX_DATA(D0_MARK, PTY0_FN), /* PTZ FN */ PINMUX_DATA(MMCDAT7_MARK, PS8_15_FN1, PTZ7_FN), PINMUX_DATA(ON_DQ7_MARK, PS8_15_FN2, PTZ7_FN), PINMUX_DATA(MMCDAT6_MARK, PS8_14_FN1, PTZ6_FN), PINMUX_DATA(ON_DQ6_MARK, PS8_14_FN2, PTZ6_FN), PINMUX_DATA(MMCDAT5_MARK, PS8_13_FN1, PTZ5_FN), PINMUX_DATA(ON_DQ5_MARK, PS8_13_FN2, PTZ5_FN), PINMUX_DATA(MMCDAT4_MARK, PS8_12_FN1, PTZ4_FN), PINMUX_DATA(ON_DQ4_MARK, PS8_12_FN2, PTZ4_FN), PINMUX_DATA(MMCDAT3_MARK, PS8_11_FN1, PTZ3_FN), PINMUX_DATA(ON_DQ3_MARK, PS8_11_FN2, PTZ3_FN), PINMUX_DATA(MMCDAT2_MARK, PS8_10_FN1, PTZ2_FN), PINMUX_DATA(ON_DQ2_MARK, PS8_10_FN2, PTZ2_FN), PINMUX_DATA(MMCDAT1_MARK, PS8_9_FN1, PTZ1_FN), PINMUX_DATA(ON_DQ1_MARK, PS8_9_FN2, PTZ1_FN), PINMUX_DATA(MMCDAT0_MARK, PS8_8_FN1, PTZ0_FN), PINMUX_DATA(ON_DQ0_MARK, PS8_8_FN2, PTZ0_FN), }; static struct pinmux_gpio pinmux_gpios[] = { /* PTA */ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA), PINMUX_GPIO(GPIO_PTA6, PTA6_DATA), PINMUX_GPIO(GPIO_PTA5, PTA5_DATA), PINMUX_GPIO(GPIO_PTA4, PTA4_DATA), PINMUX_GPIO(GPIO_PTA3, PTA3_DATA), PINMUX_GPIO(GPIO_PTA2, PTA2_DATA), PINMUX_GPIO(GPIO_PTA1, PTA1_DATA), PINMUX_GPIO(GPIO_PTA0, PTA0_DATA), /* PTB */ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA), PINMUX_GPIO(GPIO_PTB6, PTB6_DATA), PINMUX_GPIO(GPIO_PTB5, PTB5_DATA), PINMUX_GPIO(GPIO_PTB4, PTB4_DATA), PINMUX_GPIO(GPIO_PTB3, PTB3_DATA), PINMUX_GPIO(GPIO_PTB2, PTB2_DATA), PINMUX_GPIO(GPIO_PTB1, PTB1_DATA), PINMUX_GPIO(GPIO_PTB0, PTB0_DATA), /* PTC */ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA), PINMUX_GPIO(GPIO_PTC6, PTC6_DATA), PINMUX_GPIO(GPIO_PTC5, PTC5_DATA), PINMUX_GPIO(GPIO_PTC4, PTC4_DATA), PINMUX_GPIO(GPIO_PTC3, PTC3_DATA), PINMUX_GPIO(GPIO_PTC2, PTC2_DATA), PINMUX_GPIO(GPIO_PTC1, PTC1_DATA), PINMUX_GPIO(GPIO_PTC0, PTC0_DATA), /* PTD */ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA), PINMUX_GPIO(GPIO_PTD6, PTD6_DATA), PINMUX_GPIO(GPIO_PTD5, PTD5_DATA), PINMUX_GPIO(GPIO_PTD4, PTD4_DATA), PINMUX_GPIO(GPIO_PTD3, PTD3_DATA), PINMUX_GPIO(GPIO_PTD2, PTD2_DATA), PINMUX_GPIO(GPIO_PTD1, PTD1_DATA), PINMUX_GPIO(GPIO_PTD0, PTD0_DATA), /* PTE */ PINMUX_GPIO(GPIO_PTE7, PTE7_DATA), PINMUX_GPIO(GPIO_PTE6, PTE6_DATA), PINMUX_GPIO(GPIO_PTE5, PTE5_DATA), PINMUX_GPIO(GPIO_PTE4, PTE4_DATA), PINMUX_GPIO(GPIO_PTE3, PTE3_DATA), PINMUX_GPIO(GPIO_PTE2, PTE2_DATA), PINMUX_GPIO(GPIO_PTE1, PTE1_DATA), PINMUX_GPIO(GPIO_PTE0, PTE0_DATA), /* PTF */ PINMUX_GPIO(GPIO_PTF7, PTF7_DATA), PINMUX_GPIO(GPIO_PTF6, PTF6_DATA), PINMUX_GPIO(GPIO_PTF5, PTF5_DATA), PINMUX_GPIO(GPIO_PTF4, PTF4_DATA), PINMUX_GPIO(GPIO_PTF3, PTF3_DATA), PINMUX_GPIO(GPIO_PTF2, PTF2_DATA), PINMUX_GPIO(GPIO_PTF1, PTF1_DATA), PINMUX_GPIO(GPIO_PTF0, PTF0_DATA), /* PTG */ PINMUX_GPIO(GPIO_PTG7, PTG7_DATA), PINMUX_GPIO(GPIO_PTG6, PTG6_DATA), PINMUX_GPIO(GPIO_PTG5, PTG5_DATA), PINMUX_GPIO(GPIO_PTG4, PTG4_DATA), PINMUX_GPIO(GPIO_PTG3, PTG3_DATA), PINMUX_GPIO(GPIO_PTG2, PTG2_DATA), PINMUX_GPIO(GPIO_PTG1, PTG1_DATA), PINMUX_GPIO(GPIO_PTG0, PTG0_DATA), /* PTH */ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA), PINMUX_GPIO(GPIO_PTH6, PTH6_DATA), PINMUX_GPIO(GPIO_PTH5, PTH5_DATA), PINMUX_GPIO(GPIO_PTH4, PTH4_DATA), PINMUX_GPIO(GPIO_PTH3, PTH3_DATA), PINMUX_GPIO(GPIO_PTH2, PTH2_DATA), PINMUX_GPIO(GPIO_PTH1, PTH1_DATA), PINMUX_GPIO(GPIO_PTH0, PTH0_DATA), /* PTI */ PINMUX_GPIO(GPIO_PTI7, PTI7_DATA), PINMUX_GPIO(GPIO_PTI6, PTI6_DATA), PINMUX_GPIO(GPIO_PTI5, PTI5_DATA), PINMUX_GPIO(GPIO_PTI4, PTI4_DATA), PINMUX_GPIO(GPIO_PTI3, PTI3_DATA), PINMUX_GPIO(GPIO_PTI2, PTI2_DATA), PINMUX_GPIO(GPIO_PTI1, PTI1_DATA), PINMUX_GPIO(GPIO_PTI0, PTI0_DATA), /* PTJ */ PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA), PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA), PINMUX_GPIO(GPIO_PTJ4, PTJ4_DATA), PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA), PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA), PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA), PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA), /* PTK */ PINMUX_GPIO(GPIO_PTK7, PTK7_DATA), PINMUX_GPIO(GPIO_PTK6, PTK6_DATA), PINMUX_GPIO(GPIO_PTK5, PTK5_DATA), PINMUX_GPIO(GPIO_PTK4, PTK4_DATA), PINMUX_GPIO(GPIO_PTK3, PTK3_DATA), PINMUX_GPIO(GPIO_PTK2, PTK2_DATA), PINMUX_GPIO(GPIO_PTK1, PTK1_DATA), PINMUX_GPIO(GPIO_PTK0, PTK0_DATA), /* PTL */ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA), PINMUX_GPIO(GPIO_PTL5, PTL5_DATA), PINMUX_GPIO(GPIO_PTL4, PTL4_DATA), PINMUX_GPIO(GPIO_PTL3, PTL3_DATA), PINMUX_GPIO(GPIO_PTL2, PTL2_DATA), PINMUX_GPIO(GPIO_PTL1, PTL1_DATA), PINMUX_GPIO(GPIO_PTL0, PTL0_DATA), /* PTM */ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA), PINMUX_GPIO(GPIO_PTM6, PTM6_DATA), PINMUX_GPIO(GPIO_PTM5, PTM5_DATA), PINMUX_GPIO(GPIO_PTM4, PTM4_DATA), PINMUX_GPIO(GPIO_PTM3, PTM3_DATA), PINMUX_GPIO(GPIO_PTM2, PTM2_DATA), PINMUX_GPIO(GPIO_PTM1, PTM1_DATA), PINMUX_GPIO(GPIO_PTM0, PTM0_DATA), /* PTN */ PINMUX_GPIO(GPIO_PTN6, PTN6_DATA), PINMUX_GPIO(GPIO_PTN5, PTN5_DATA), PINMUX_GPIO(GPIO_PTN4, PTN4_DATA), PINMUX_GPIO(GPIO_PTN3, PTN3_DATA), PINMUX_GPIO(GPIO_PTN2, PTN2_DATA), PINMUX_GPIO(GPIO_PTN1, PTN1_DATA), PINMUX_GPIO(GPIO_PTN0, PTN0_DATA), /* PTO */ PINMUX_GPIO(GPIO_PTO7, PTO7_DATA), PINMUX_GPIO(GPIO_PTO6, PTO6_DATA), PINMUX_GPIO(GPIO_PTO5, PTO5_DATA), PINMUX_GPIO(GPIO_PTO4, PTO4_DATA), PINMUX_GPIO(GPIO_PTO3, PTO3_DATA), PINMUX_GPIO(GPIO_PTO2, PTO2_DATA), PINMUX_GPIO(GPIO_PTO1, PTO1_DATA), PINMUX_GPIO(GPIO_PTO0, PTO0_DATA), /* PTP */ PINMUX_GPIO(GPIO_PTP7, PTP7_DATA), PINMUX_GPIO(GPIO_PTP6, PTP6_DATA), PINMUX_GPIO(GPIO_PTP5, PTP5_DATA), PINMUX_GPIO(GPIO_PTP4, PTP4_DATA), PINMUX_GPIO(GPIO_PTP3, PTP3_DATA), PINMUX_GPIO(GPIO_PTP2, PTP2_DATA), PINMUX_GPIO(GPIO_PTP1, PTP1_DATA), PINMUX_GPIO(GPIO_PTP0, PTP0_DATA), /* PTQ */ PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA), PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA), PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA), PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA), PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA), PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA), PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA), /* PTR */ PINMUX_GPIO(GPIO_PTR7, PTR7_DATA), PINMUX_GPIO(GPIO_PTR6, PTR6_DATA), PINMUX_GPIO(GPIO_PTR5, PTR5_DATA), PINMUX_GPIO(GPIO_PTR4, PTR4_DATA), PINMUX_GPIO(GPIO_PTR3, PTR3_DATA), PINMUX_GPIO(GPIO_PTR2, PTR2_DATA), PINMUX_GPIO(GPIO_PTR1, PTR1_DATA), PINMUX_GPIO(GPIO_PTR0, PTR0_DATA), /* PTS */ PINMUX_GPIO(GPIO_PTS7, PTS7_DATA), PINMUX_GPIO(GPIO_PTS6, PTS6_DATA), PINMUX_GPIO(GPIO_PTS5, PTS5_DATA), PINMUX_GPIO(GPIO_PTS4, PTS4_DATA), PINMUX_GPIO(GPIO_PTS3, PTS3_DATA), PINMUX_GPIO(GPIO_PTS2, PTS2_DATA), PINMUX_GPIO(GPIO_PTS1, PTS1_DATA), PINMUX_GPIO(GPIO_PTS0, PTS0_DATA), /* PTT */ PINMUX_GPIO(GPIO_PTT7, PTT7_DATA), PINMUX_GPIO(GPIO_PTT6, PTT6_DATA), PINMUX_GPIO(GPIO_PTT5, PTT5_DATA), PINMUX_GPIO(GPIO_PTT4, PTT4_DATA), PINMUX_GPIO(GPIO_PTT3, PTT3_DATA), PINMUX_GPIO(GPIO_PTT2, PTT2_DATA), PINMUX_GPIO(GPIO_PTT1, PTT1_DATA), PINMUX_GPIO(GPIO_PTT0, PTT0_DATA), /* PTU */ PINMUX_GPIO(GPIO_PTU7, PTU7_DATA), PINMUX_GPIO(GPIO_PTU6, PTU6_DATA), PINMUX_GPIO(GPIO_PTU5, PTU5_DATA), PINMUX_GPIO(GPIO_PTU4, PTU4_DATA), PINMUX_GPIO(GPIO_PTU3, PTU3_DATA), PINMUX_GPIO(GPIO_PTU2, PTU2_DATA), PINMUX_GPIO(GPIO_PTU1, PTU1_DATA), PINMUX_GPIO(GPIO_PTU0, PTU0_DATA), /* PTV */ PINMUX_GPIO(GPIO_PTV7, PTV7_DATA), PINMUX_GPIO(GPIO_PTV6, PTV6_DATA), PINMUX_GPIO(GPIO_PTV5, PTV5_DATA), PINMUX_GPIO(GPIO_PTV4, PTV4_DATA), PINMUX_GPIO(GPIO_PTV3, PTV3_DATA), PINMUX_GPIO(GPIO_PTV2, PTV2_DATA), PINMUX_GPIO(GPIO_PTV1, PTV1_DATA), PINMUX_GPIO(GPIO_PTV0, PTV0_DATA), /* PTW */ PINMUX_GPIO(GPIO_PTW7, PTW7_DATA), PINMUX_GPIO(GPIO_PTW6, PTW6_DATA), PINMUX_GPIO(GPIO_PTW5, PTW5_DATA), PINMUX_GPIO(GPIO_PTW4, PTW4_DATA), PINMUX_GPIO(GPIO_PTW3, PTW3_DATA), PINMUX_GPIO(GPIO_PTW2, PTW2_DATA), PINMUX_GPIO(GPIO_PTW1, PTW1_DATA), PINMUX_GPIO(GPIO_PTW0, PTW0_DATA), /* PTX */ PINMUX_GPIO(GPIO_PTX7, PTX7_DATA), PINMUX_GPIO(GPIO_PTX6, PTX6_DATA), PINMUX_GPIO(GPIO_PTX5, PTX5_DATA), PINMUX_GPIO(GPIO_PTX4, PTX4_DATA), PINMUX_GPIO(GPIO_PTX3, PTX3_DATA), PINMUX_GPIO(GPIO_PTX2, PTX2_DATA), PINMUX_GPIO(GPIO_PTX1, PTX1_DATA), PINMUX_GPIO(GPIO_PTX0, PTX0_DATA), /* PTY */ PINMUX_GPIO(GPIO_PTY7, PTY7_DATA), PINMUX_GPIO(GPIO_PTY6, PTY6_DATA), PINMUX_GPIO(GPIO_PTY5, PTY5_DATA), PINMUX_GPIO(GPIO_PTY4, PTY4_DATA), PINMUX_GPIO(GPIO_PTY3, PTY3_DATA), PINMUX_GPIO(GPIO_PTY2, PTY2_DATA), PINMUX_GPIO(GPIO_PTY1, PTY1_DATA), PINMUX_GPIO(GPIO_PTY0, PTY0_DATA), /* PTZ */ PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA), PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA), PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA), PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA), PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA), PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA), PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA), PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA), /* PTA (mobule: LBSC, RGMII) */ PINMUX_GPIO(GPIO_FN_BS, BS_MARK), PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK), PINMUX_GPIO(GPIO_FN_WE1, WE1_MARK), PINMUX_GPIO(GPIO_FN_RDY, RDY_MARK), PINMUX_GPIO(GPIO_FN_ET0_MDC, ET0_MDC_MARK), PINMUX_GPIO(GPIO_FN_ET0_MDIO, ET0_MDC_MARK), PINMUX_GPIO(GPIO_FN_ET1_MDC, ET1_MDC_MARK), PINMUX_GPIO(GPIO_FN_ET1_MDIO, ET1_MDC_MARK), /* PTB (mobule: INTC, ONFI, TMU) */ PINMUX_GPIO(GPIO_FN_IRQ15, IRQ15_MARK), PINMUX_GPIO(GPIO_FN_IRQ14, IRQ14_MARK), PINMUX_GPIO(GPIO_FN_IRQ13, IRQ13_MARK), PINMUX_GPIO(GPIO_FN_IRQ12, IRQ12_MARK), PINMUX_GPIO(GPIO_FN_IRQ11, IRQ11_MARK), PINMUX_GPIO(GPIO_FN_IRQ10, IRQ10_MARK), PINMUX_GPIO(GPIO_FN_IRQ9, IRQ9_MARK), PINMUX_GPIO(GPIO_FN_IRQ8, IRQ8_MARK), PINMUX_GPIO(GPIO_FN_ON_NRE, ON_NRE_MARK), PINMUX_GPIO(GPIO_FN_ON_NWE, ON_NWE_MARK), PINMUX_GPIO(GPIO_FN_ON_NWP, ON_NWP_MARK), PINMUX_GPIO(GPIO_FN_ON_NCE0, ON_NCE0_MARK), PINMUX_GPIO(GPIO_FN_ON_R_B0, ON_R_B0_MARK), PINMUX_GPIO(GPIO_FN_ON_ALE, ON_ALE_MARK), PINMUX_GPIO(GPIO_FN_ON_CLE, ON_CLE_MARK), PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK), /* PTC (mobule: IRQ, PWMU) */ PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK), PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK), PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK), PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK), PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK), PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK), PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK), PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK), PINMUX_GPIO(GPIO_FN_PWMU0, PWMU0_MARK), PINMUX_GPIO(GPIO_FN_PWMU1, PWMU1_MARK), PINMUX_GPIO(GPIO_FN_PWMU2, PWMU2_MARK), PINMUX_GPIO(GPIO_FN_PWMU3, PWMU3_MARK), PINMUX_GPIO(GPIO_FN_PWMU4, PWMU4_MARK), PINMUX_GPIO(GPIO_FN_PWMU5, PWMU5_MARK), /* PTD (mobule: SPI0, DMAC) */ PINMUX_GPIO(GPIO_FN_SP0_MOSI, SP0_MOSI_MARK), PINMUX_GPIO(GPIO_FN_SP0_MISO, SP0_MISO_MARK), PINMUX_GPIO(GPIO_FN_SP0_SCK, SP0_SCK_MARK), PINMUX_GPIO(GPIO_FN_SP0_SCK_FB, SP0_SCK_FB_MARK), PINMUX_GPIO(GPIO_FN_SP0_SS0, SP0_SS0_MARK), PINMUX_GPIO(GPIO_FN_SP0_SS1, SP0_SS1_MARK), PINMUX_GPIO(GPIO_FN_SP0_SS2, SP0_SS2_MARK), PINMUX_GPIO(GPIO_FN_SP0_SS3, SP0_SS3_MARK), PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK), PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK), PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK), /* PTE (mobule: RMII) */ PINMUX_GPIO(GPIO_FN_RMII0_CRS_DV, RMII0_CRS_DV_MARK), PINMUX_GPIO(GPIO_FN_RMII0_TXD1, RMII0_TXD1_MARK), PINMUX_GPIO(GPIO_FN_RMII0_TXD0, RMII0_TXD0_MARK), PINMUX_GPIO(GPIO_FN_RMII0_TXEN, RMII0_TXEN_MARK), PINMUX_GPIO(GPIO_FN_RMII0_REFCLK, RMII0_REFCLK_MARK), PINMUX_GPIO(GPIO_FN_RMII0_RXD1, RMII0_RXD1_MARK), PINMUX_GPIO(GPIO_FN_RMII0_RXD0, RMII0_RXD0_MARK), PINMUX_GPIO(GPIO_FN_RMII0_RX_ER, RMII0_RX_ER_MARK), /* PTF (mobule: RMII, SerMux) */ PINMUX_GPIO(GPIO_FN_RMII1_CRS_DV, RMII1_CRS_DV_MARK), PINMUX_GPIO(GPIO_FN_RMII1_TXD1, RMII1_TXD1_MARK), PINMUX_GPIO(GPIO_FN_RMII1_TXD0, RMII1_TXD0_MARK), PINMUX_GPIO(GPIO_FN_RMII1_TXEN, RMII1_TXEN_MARK), PINMUX_GPIO(GPIO_FN_RMII1_REFCLK, RMII1_REFCLK_MARK), PINMUX_GPIO(GPIO_FN_RMII1_RXD1, RMII1_RXD1_MARK), PINMUX_GPIO(GPIO_FN_RMII1_RXD0, RMII1_RXD0_MARK), PINMUX_GPIO(GPIO_FN_RMII1_RX_ER, RMII1_RX_ER_MARK), PINMUX_GPIO(GPIO_FN_RAC_RI, RAC_RI_MARK), /* PTG (mobule: system, LBSC, LPC, WDT, LPC, eMMC) */ PINMUX_GPIO(GPIO_FN_BOOTFMS, BOOTFMS_MARK), PINMUX_GPIO(GPIO_FN_BOOTWP, BOOTWP_MARK), PINMUX_GPIO(GPIO_FN_A25, A25_MARK), PINMUX_GPIO(GPIO_FN_A24, A24_MARK), PINMUX_GPIO(GPIO_FN_SERIRQ, SERIRQ_MARK), PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK), PINMUX_GPIO(GPIO_FN_LPCPD, LPCPD_MARK), PINMUX_GPIO(GPIO_FN_LDRQ, LDRQ_MARK), PINMUX_GPIO(GPIO_FN_MMCCLK, MMCCLK_MARK), PINMUX_GPIO(GPIO_FN_MMCCMD, MMCCMD_MARK), /* PTH (mobule: SPI1, LPC, DMAC, ADC) */ PINMUX_GPIO(GPIO_FN_SP1_MOSI, SP1_MOSI_MARK), PINMUX_GPIO(GPIO_FN_SP1_MISO, SP1_MISO_MARK), PINMUX_GPIO(GPIO_FN_SP1_SCK, SP1_SCK_MARK), PINMUX_GPIO(GPIO_FN_SP1_SCK_FB, SP1_SCK_FB_MARK), PINMUX_GPIO(GPIO_FN_SP1_SS0, SP1_SS0_MARK), PINMUX_GPIO(GPIO_FN_SP1_SS1, SP1_SS1_MARK), PINMUX_GPIO(GPIO_FN_WP, WP_MARK), PINMUX_GPIO(GPIO_FN_FMS0, FMS0_MARK), PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK), PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK), PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK), PINMUX_GPIO(GPIO_FN_ADTRG1, ADTRG1_MARK), PINMUX_GPIO(GPIO_FN_ADTRG0, ADTRG0_MARK), /* PTI (mobule: LBSC, SDHI) */ PINMUX_GPIO(GPIO_FN_D15, D15_MARK), PINMUX_GPIO(GPIO_FN_D14, D14_MARK), PINMUX_GPIO(GPIO_FN_D13, D13_MARK), PINMUX_GPIO(GPIO_FN_D12, D12_MARK), PINMUX_GPIO(GPIO_FN_D11, D11_MARK), PINMUX_GPIO(GPIO_FN_D10, D10_MARK), PINMUX_GPIO(GPIO_FN_D9, D9_MARK), PINMUX_GPIO(GPIO_FN_D8, D8_MARK), PINMUX_GPIO(GPIO_FN_SD_WP, SD_WP_MARK), PINMUX_GPIO(GPIO_FN_SD_CD, SD_CD_MARK), PINMUX_GPIO(GPIO_FN_SD_CLK, SD_CLK_MARK), PINMUX_GPIO(GPIO_FN_SD_CMD, SD_CMD_MARK), PINMUX_GPIO(GPIO_FN_SD_D3, SD_D3_MARK), PINMUX_GPIO(GPIO_FN_SD_D2, SD_D2_MARK), PINMUX_GPIO(GPIO_FN_SD_D1, SD_D1_MARK), PINMUX_GPIO(GPIO_FN_SD_D0, SD_D0_MARK), /* PTJ (mobule: SCIF234, SERMUX) */ PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK), PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK), PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK), PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK), PINMUX_GPIO(GPIO_FN_RTS4, RTS4_MARK), PINMUX_GPIO(GPIO_FN_RXD4, RXD4_MARK), PINMUX_GPIO(GPIO_FN_TXD4, TXD4_MARK), /* PTK (mobule: SERMUX, LBSC, SCIF) */ PINMUX_GPIO(GPIO_FN_COM2_TXD, COM2_TXD_MARK), PINMUX_GPIO(GPIO_FN_COM2_RXD, COM2_RXD_MARK), PINMUX_GPIO(GPIO_FN_COM2_RTS, COM2_RTS_MARK), PINMUX_GPIO(GPIO_FN_COM2_CTS, COM2_CTS_MARK), PINMUX_GPIO(GPIO_FN_COM2_DTR, COM2_DTR_MARK), PINMUX_GPIO(GPIO_FN_COM2_DSR, COM2_DSR_MARK), PINMUX_GPIO(GPIO_FN_COM2_DCD, COM2_DCD_MARK), PINMUX_GPIO(GPIO_FN_CLKOUT, CLKOUT_MARK), PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK), PINMUX_GPIO(GPIO_FN_SCK4, SCK4_MARK), PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK), /* PTL (mobule: SERMUX, SCIF, LBSC, AUD) */ PINMUX_GPIO(GPIO_FN_RAC_RXD, RAC_RXD_MARK), PINMUX_GPIO(GPIO_FN_RAC_RTS, RAC_RTS_MARK), PINMUX_GPIO(GPIO_FN_RAC_CTS, RAC_CTS_MARK), PINMUX_GPIO(GPIO_FN_RAC_DTR, RAC_DTR_MARK), PINMUX_GPIO(GPIO_FN_RAC_DSR, RAC_DSR_MARK), PINMUX_GPIO(GPIO_FN_RAC_DCD, RAC_DCD_MARK), PINMUX_GPIO(GPIO_FN_RAC_TXD, RAC_TXD_MARK), PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK), PINMUX_GPIO(GPIO_FN_CS5, CS5_MARK), PINMUX_GPIO(GPIO_FN_CS6, CS6_MARK), PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK), PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK), PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK), /* PTM (mobule: LBSC, IIC) */ PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK), PINMUX_GPIO(GPIO_FN_RD, RD_MARK), PINMUX_GPIO(GPIO_FN_WE0, WE0_MARK), PINMUX_GPIO(GPIO_FN_CS0, CS0_MARK), PINMUX_GPIO(GPIO_FN_SDA6, SDA6_MARK), PINMUX_GPIO(GPIO_FN_SCL6, SCL6_MARK), PINMUX_GPIO(GPIO_FN_SDA7, SDA7_MARK), PINMUX_GPIO(GPIO_FN_SCL7, SCL7_MARK), /* PTN (mobule: USB, JMC, SGPIO, WDT) */ PINMUX_GPIO(GPIO_FN_VBUS_EN, VBUS_EN_MARK), PINMUX_GPIO(GPIO_FN_VBUS_OC, VBUS_OC_MARK), PINMUX_GPIO(GPIO_FN_JMCTCK, JMCTCK_MARK), PINMUX_GPIO(GPIO_FN_JMCTMS, JMCTMS_MARK), PINMUX_GPIO(GPIO_FN_JMCTDO, JMCTDO_MARK), PINMUX_GPIO(GPIO_FN_JMCTDI, JMCTDI_MARK), PINMUX_GPIO(GPIO_FN_JMCTRST, JMCTRST_MARK), PINMUX_GPIO(GPIO_FN_SGPIO1_CLK, SGPIO1_CLK_MARK), PINMUX_GPIO(GPIO_FN_SGPIO1_LOAD, SGPIO1_LOAD_MARK), PINMUX_GPIO(GPIO_FN_SGPIO1_DI, SGPIO1_DI_MARK), PINMUX_GPIO(GPIO_FN_SGPIO1_DO, SGPIO1_DO_MARK), PINMUX_GPIO(GPIO_FN_SUB_CLKIN, SUB_CLKIN_MARK), /* PTO (mobule: SGPIO, SerMux) */ PINMUX_GPIO(GPIO_FN_SGPIO0_CLK, SGPIO0_CLK_MARK), PINMUX_GPIO(GPIO_FN_SGPIO0_LOAD, SGPIO0_LOAD_MARK), PINMUX_GPIO(GPIO_FN_SGPIO0_DI, SGPIO0_DI_MARK), PINMUX_GPIO(GPIO_FN_SGPIO0_DO, SGPIO0_DO_MARK), PINMUX_GPIO(GPIO_FN_SGPIO2_CLK, SGPIO2_CLK_MARK), PINMUX_GPIO(GPIO_FN_SGPIO2_LOAD, SGPIO2_LOAD_MARK), PINMUX_GPIO(GPIO_FN_SGPIO2_DI, SGPIO2_DI_MARK), PINMUX_GPIO(GPIO_FN_SGPIO2_DO, SGPIO2_DO_MARK), PINMUX_GPIO(GPIO_FN_COM1_TXD, COM1_TXD_MARK), PINMUX_GPIO(GPIO_FN_COM1_RXD, COM1_RXD_MARK), PINMUX_GPIO(GPIO_FN_COM1_RTS, COM1_RTS_MARK), PINMUX_GPIO(GPIO_FN_COM1_CTS, COM1_CTS_MARK), /* PTP (mobule: EVC, ADC) */ /* PTQ (mobule: LPC) */ PINMUX_GPIO(GPIO_FN_LAD3, LAD3_MARK), PINMUX_GPIO(GPIO_FN_LAD2, LAD2_MARK), PINMUX_GPIO(GPIO_FN_LAD1, LAD1_MARK), PINMUX_GPIO(GPIO_FN_LAD0, LAD0_MARK), PINMUX_GPIO(GPIO_FN_LFRAME, LFRAME_MARK), PINMUX_GPIO(GPIO_FN_LRESET, LRESET_MARK), PINMUX_GPIO(GPIO_FN_LCLK, LCLK_MARK), /* PTR (mobule: GRA, IIC) */ PINMUX_GPIO(GPIO_FN_DDC3, DDC3_MARK), PINMUX_GPIO(GPIO_FN_DDC2, DDC2_MARK), PINMUX_GPIO(GPIO_FN_SDA8, SDA8_MARK), PINMUX_GPIO(GPIO_FN_SCL8, SCL8_MARK), PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK), PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK), PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK), PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK), PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK), PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK), /* PTS (mobule: GRA, IIC) */ PINMUX_GPIO(GPIO_FN_DDC1, DDC1_MARK), PINMUX_GPIO(GPIO_FN_DDC0, DDC0_MARK), PINMUX_GPIO(GPIO_FN_SDA9, SDA9_MARK), PINMUX_GPIO(GPIO_FN_SCL9, SCL9_MARK), PINMUX_GPIO(GPIO_FN_SDA5, SDA5_MARK), PINMUX_GPIO(GPIO_FN_SCL5, SCL5_MARK), PINMUX_GPIO(GPIO_FN_SDA4, SDA4_MARK), PINMUX_GPIO(GPIO_FN_SCL4, SCL4_MARK), PINMUX_GPIO(GPIO_FN_SDA3, SDA3_MARK), PINMUX_GPIO(GPIO_FN_SCL3, SCL3_MARK), /* PTT (mobule: PWMX, AUD) */ PINMUX_GPIO(GPIO_FN_PWMX7, PWMX7_MARK), PINMUX_GPIO(GPIO_FN_PWMX6, PWMX6_MARK), PINMUX_GPIO(GPIO_FN_PWMX5, PWMX5_MARK), PINMUX_GPIO(GPIO_FN_PWMX4, PWMX4_MARK), PINMUX_GPIO(GPIO_FN_PWMX3, PWMX3_MARK), PINMUX_GPIO(GPIO_FN_PWMX2, PWMX2_MARK), PINMUX_GPIO(GPIO_FN_PWMX1, PWMX1_MARK), PINMUX_GPIO(GPIO_FN_PWMX0, PWMX0_MARK), PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK), PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK), PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK), PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK), PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK), PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK), /* PTU (mobule: LPC, APM) */ PINMUX_GPIO(GPIO_FN_LGPIO7, LGPIO7_MARK), PINMUX_GPIO(GPIO_FN_LGPIO6, LGPIO6_MARK), PINMUX_GPIO(GPIO_FN_LGPIO5, LGPIO5_MARK), PINMUX_GPIO(GPIO_FN_LGPIO4, LGPIO4_MARK), PINMUX_GPIO(GPIO_FN_LGPIO3, LGPIO3_MARK), PINMUX_GPIO(GPIO_FN_LGPIO2, LGPIO2_MARK), PINMUX_GPIO(GPIO_FN_LGPIO1, LGPIO1_MARK), PINMUX_GPIO(GPIO_FN_LGPIO0, LGPIO0_MARK), PINMUX_GPIO(GPIO_FN_APMONCTL_O, APMONCTL_O_MARK), PINMUX_GPIO(GPIO_FN_APMPWBTOUT_O, APMPWBTOUT_O_MARK), PINMUX_GPIO(GPIO_FN_APMSCI_O, APMSCI_O_MARK), PINMUX_GPIO(GPIO_FN_APMVDDON, APMVDDON_MARK), PINMUX_GPIO(GPIO_FN_APMSLPBTN, APMSLPBTN_MARK), PINMUX_GPIO(GPIO_FN_APMPWRBTN, APMPWRBTN_MARK), PINMUX_GPIO(GPIO_FN_APMS5N, APMS5N_MARK), PINMUX_GPIO(GPIO_FN_APMS3N, APMS3N_MARK), /* PTV (mobule: LBSC, SerMux, R-SPI, EVC, GRA) */ PINMUX_GPIO(GPIO_FN_A23, A23_MARK), PINMUX_GPIO(GPIO_FN_A22, A22_MARK), PINMUX_GPIO(GPIO_FN_A21, A21_MARK), PINMUX_GPIO(GPIO_FN_A20, A20_MARK), PINMUX_GPIO(GPIO_FN_A19, A19_MARK), PINMUX_GPIO(GPIO_FN_A18, A18_MARK), PINMUX_GPIO(GPIO_FN_A17, A17_MARK), PINMUX_GPIO(GPIO_FN_A16, A16_MARK), PINMUX_GPIO(GPIO_FN_COM2_RI, COM2_RI_MARK), PINMUX_GPIO(GPIO_FN_R_SPI_MOSI, R_SPI_MOSI_MARK), PINMUX_GPIO(GPIO_FN_R_SPI_MISO, R_SPI_MISO_MARK), PINMUX_GPIO(GPIO_FN_R_SPI_RSPCK, R_SPI_RSPCK_MARK), PINMUX_GPIO(GPIO_FN_R_SPI_SSL0, R_SPI_SSL0_MARK), PINMUX_GPIO(GPIO_FN_R_SPI_SSL1, R_SPI_SSL1_MARK), PINMUX_GPIO(GPIO_FN_EVENT7, EVENT7_MARK), PINMUX_GPIO(GPIO_FN_EVENT6, EVENT6_MARK), PINMUX_GPIO(GPIO_FN_VBIOS_DI, VBIOS_DI_MARK), PINMUX_GPIO(GPIO_FN_VBIOS_DO, VBIOS_DO_MARK), PINMUX_GPIO(GPIO_FN_VBIOS_CLK, VBIOS_CLK_MARK), PINMUX_GPIO(GPIO_FN_VBIOS_CS, VBIOS_CS_MARK), /* PTW (mobule: LBSC, EVC, SCIF) */ PINMUX_GPIO(GPIO_FN_A16, A16_MARK), PINMUX_GPIO(GPIO_FN_A15, A15_MARK), PINMUX_GPIO(GPIO_FN_A14, A14_MARK), PINMUX_GPIO(GPIO_FN_A13, A13_MARK), PINMUX_GPIO(GPIO_FN_A12, A12_MARK), PINMUX_GPIO(GPIO_FN_A11, A11_MARK), PINMUX_GPIO(GPIO_FN_A10, A10_MARK), PINMUX_GPIO(GPIO_FN_A9, A9_MARK), PINMUX_GPIO(GPIO_FN_A8, A8_MARK), PINMUX_GPIO(GPIO_FN_EVENT5, EVENT5_MARK), PINMUX_GPIO(GPIO_FN_EVENT4, EVENT4_MARK), PINMUX_GPIO(GPIO_FN_EVENT3, EVENT3_MARK), PINMUX_GPIO(GPIO_FN_EVENT2, EVENT2_MARK), PINMUX_GPIO(GPIO_FN_EVENT1, EVENT1_MARK), PINMUX_GPIO(GPIO_FN_EVENT0, EVENT0_MARK), PINMUX_GPIO(GPIO_FN_CTS4, CTS4_MARK), PINMUX_GPIO(GPIO_FN_CTS2, CTS2_MARK), /* PTX (mobule: LBSC) */ PINMUX_GPIO(GPIO_FN_A7, A7_MARK), PINMUX_GPIO(GPIO_FN_A6, A6_MARK), PINMUX_GPIO(GPIO_FN_A5, A5_MARK), PINMUX_GPIO(GPIO_FN_A4, A4_MARK), PINMUX_GPIO(GPIO_FN_A3, A3_MARK), PINMUX_GPIO(GPIO_FN_A2, A2_MARK), PINMUX_GPIO(GPIO_FN_A1, A1_MARK), PINMUX_GPIO(GPIO_FN_A0, A0_MARK), PINMUX_GPIO(GPIO_FN_RTS2, RTS2_MARK), PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK), PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK), PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK), /* PTY (mobule: LBSC) */ PINMUX_GPIO(GPIO_FN_D7, D7_MARK), PINMUX_GPIO(GPIO_FN_D6, D6_MARK), PINMUX_GPIO(GPIO_FN_D5, D5_MARK), PINMUX_GPIO(GPIO_FN_D4, D4_MARK), PINMUX_GPIO(GPIO_FN_D3, D3_MARK), PINMUX_GPIO(GPIO_FN_D2, D2_MARK), PINMUX_GPIO(GPIO_FN_D1, D1_MARK), PINMUX_GPIO(GPIO_FN_D0, D0_MARK), /* PTZ (mobule: eMMC, ONFI) */ PINMUX_GPIO(GPIO_FN_MMCDAT7, MMCDAT7_MARK), PINMUX_GPIO(GPIO_FN_MMCDAT6, MMCDAT6_MARK), PINMUX_GPIO(GPIO_FN_MMCDAT5, MMCDAT5_MARK), PINMUX_GPIO(GPIO_FN_MMCDAT4, MMCDAT4_MARK), PINMUX_GPIO(GPIO_FN_MMCDAT3, MMCDAT3_MARK), PINMUX_GPIO(GPIO_FN_MMCDAT2, MMCDAT2_MARK), PINMUX_GPIO(GPIO_FN_MMCDAT1, MMCDAT1_MARK), PINMUX_GPIO(GPIO_FN_MMCDAT0, MMCDAT0_MARK), PINMUX_GPIO(GPIO_FN_ON_DQ7, ON_DQ7_MARK), PINMUX_GPIO(GPIO_FN_ON_DQ6, ON_DQ6_MARK), PINMUX_GPIO(GPIO_FN_ON_DQ5, ON_DQ5_MARK), PINMUX_GPIO(GPIO_FN_ON_DQ4, ON_DQ4_MARK), PINMUX_GPIO(GPIO_FN_ON_DQ3, ON_DQ3_MARK), PINMUX_GPIO(GPIO_FN_ON_DQ2, ON_DQ2_MARK), PINMUX_GPIO(GPIO_FN_ON_DQ1, ON_DQ1_MARK), PINMUX_GPIO(GPIO_FN_ON_DQ0, ON_DQ0_MARK), }; static struct pinmux_cfg_reg pinmux_config_regs[] = { { PINMUX_CFG_REG("PACR", 0xffec0000, 16, 2) { PTA7_FN, PTA7_OUT, PTA7_IN, PTA7_IN_PU, PTA6_FN, PTA6_OUT, PTA6_IN, PTA6_IN_PU, PTA5_FN, PTA5_OUT, PTA5_IN, PTA5_IN_PU, PTA4_FN, PTA4_OUT, PTA4_IN, PTA4_IN_PU, PTA3_FN, PTA3_OUT, PTA3_IN, PTA3_IN_PU, PTA2_FN, PTA2_OUT, PTA2_IN, PTA2_IN_PU, PTA1_FN, PTA1_OUT, PTA1_IN, PTA1_IN_PU, PTA0_FN, PTA0_OUT, PTA0_IN, PTA0_IN_PU } }, { PINMUX_CFG_REG("PBCR", 0xffec0002, 16, 2) { PTB7_FN, PTB7_OUT, PTB7_IN, 0, PTB6_FN, PTB6_OUT, PTB6_IN, 0, PTB5_FN, PTB5_OUT, PTB5_IN, 0, PTB4_FN, PTB4_OUT, PTB4_IN, 0, PTB3_FN, PTB3_OUT, PTB3_IN, 0, PTB2_FN, PTB2_OUT, PTB2_IN, 0, PTB1_FN, PTB1_OUT, PTB1_IN, 0, PTB0_FN, PTB0_OUT, PTB0_IN, 0 } }, { PINMUX_CFG_REG("PCCR", 0xffec0004, 16, 2) { PTC7_FN, PTC7_OUT, PTC7_IN, 0, PTC6_FN, PTC6_OUT, PTC6_IN, 0, PTC5_FN, PTC5_OUT, PTC5_IN, 0, PTC4_FN, PTC4_OUT, PTC4_IN, 0, PTC3_FN, PTC3_OUT, PTC3_IN, 0, PTC2_FN, PTC2_OUT, PTC2_IN, 0, PTC1_FN, PTC1_OUT, PTC1_IN, 0, PTC0_FN, PTC0_OUT, PTC0_IN, 0 } }, { PINMUX_CFG_REG("PDCR", 0xffec0006, 16, 2) { PTD7_FN, PTD7_OUT, PTD7_IN, PTD7_IN_PU, PTD6_FN, PTD6_OUT, PTD6_IN, PTD6_IN_PU, PTD5_FN, PTD5_OUT, PTD5_IN, PTD5_IN_PU, PTD4_FN, PTD4_OUT, PTD4_IN, PTD4_IN_PU, PTD3_FN, PTD3_OUT, PTD3_IN, PTD3_IN_PU, PTD2_FN, PTD2_OUT, PTD2_IN, PTD2_IN_PU, PTD1_FN, PTD1_OUT, PTD1_IN, PTD1_IN_PU, PTD0_FN, PTD0_OUT, PTD0_IN, PTD0_IN_PU } }, { PINMUX_CFG_REG("PECR", 0xffec0008, 16, 2) { PTE7_FN, PTE7_OUT, PTE7_IN, PTE7_IN_PU, PTE6_FN, PTE6_OUT, PTE6_IN, PTE6_IN_PU, PTE5_FN, PTE5_OUT, PTE5_IN, PTE5_IN_PU, PTE4_FN, PTE4_OUT, PTE4_IN, PTE4_IN_PU, PTE3_FN, PTE3_OUT, PTE3_IN, PTE3_IN_PU, PTE2_FN, PTE2_OUT, PTE2_IN, PTE2_IN_PU, PTE1_FN, PTE1_OUT, PTE1_IN, PTE1_IN_PU, PTE0_FN, PTE0_OUT, PTE0_IN, PTE0_IN_PU } }, { PINMUX_CFG_REG("PFCR", 0xffec000a, 16, 2) { PTF7_FN, PTF7_OUT, PTF7_IN, PTF7_IN_PU, PTF6_FN, PTF6_OUT, PTF6_IN, PTF6_IN_PU, PTF5_FN, PTF5_OUT, PTF5_IN, PTF5_IN_PU, PTF4_FN, PTF4_OUT, PTF4_IN, PTF4_IN_PU, PTF3_FN, PTF3_OUT, PTF3_IN, PTF3_IN_PU, PTF2_FN, PTF2_OUT, PTF2_IN, PTF2_IN_PU, PTF1_FN, PTF1_OUT, PTF1_IN, PTF1_IN_PU, PTF0_FN, PTF0_OUT, PTF0_IN, PTF0_IN_PU } }, { PINMUX_CFG_REG("PGCR", 0xffec000c, 16, 2) { PTG7_FN, PTG7_OUT, PTG7_IN, PTG7_IN_PU , PTG6_FN, PTG6_OUT, PTG6_IN, PTG6_IN_PU , PTG5_FN, PTG5_OUT, PTG5_IN, 0, PTG4_FN, PTG4_OUT, PTG4_IN, PTG4_IN_PU , PTG3_FN, PTG3_OUT, PTG3_IN, 0, PTG2_FN, PTG2_OUT, PTG2_IN, 0, PTG1_FN, PTG1_OUT, PTG1_IN, 0, PTG0_FN, PTG0_OUT, PTG0_IN, 0 } }, { PINMUX_CFG_REG("PHCR", 0xffec000e, 16, 2) { PTH7_FN, PTH7_OUT, PTH7_IN, PTH7_IN_PU, PTH6_FN, PTH6_OUT, PTH6_IN, PTH6_IN_PU, PTH5_FN, PTH5_OUT, PTH5_IN, PTH5_IN_PU, PTH4_FN, PTH4_OUT, PTH4_IN, PTH4_IN_PU, PTH3_FN, PTH3_OUT, PTH3_IN, PTH3_IN_PU, PTH2_FN, PTH2_OUT, PTH2_IN, PTH2_IN_PU, PTH1_FN, PTH1_OUT, PTH1_IN, PTH1_IN_PU, PTH0_FN, PTH0_OUT, PTH0_IN, PTH0_IN_PU } }, { PINMUX_CFG_REG("PICR", 0xffec0010, 16, 2) { PTI7_FN, PTI7_OUT, PTI7_IN, PTI7_IN_PU, PTI6_FN, PTI6_OUT, PTI6_IN, PTI6_IN_PU, PTI5_FN, PTI5_OUT, PTI5_IN, 0, PTI4_FN, PTI4_OUT, PTI4_IN, PTI4_IN_PU, PTI3_FN, PTI3_OUT, PTI3_IN, PTI3_IN_PU, PTI2_FN, PTI2_OUT, PTI2_IN, PTI2_IN_PU, PTI1_FN, PTI1_OUT, PTI1_IN, PTI1_IN_PU, PTI0_FN, PTI0_OUT, PTI0_IN, PTI0_IN_PU } }, { PINMUX_CFG_REG("PJCR", 0xffec0012, 16, 2) { 0, 0, 0, 0, /* reserved: always set 1 */ PTJ6_FN, PTJ6_OUT, PTJ6_IN, PTJ6_IN_PU, PTJ5_FN, PTJ5_OUT, PTJ5_IN, PTJ5_IN_PU, PTJ4_FN, PTJ4_OUT, PTJ4_IN, PTJ4_IN_PU, PTJ3_FN, PTJ3_OUT, PTJ3_IN, PTJ3_IN_PU, PTJ2_FN, PTJ2_OUT, PTJ2_IN, PTJ2_IN_PU, PTJ1_FN, PTJ1_OUT, PTJ1_IN, PTJ1_IN_PU, PTJ0_FN, PTJ0_OUT, PTJ0_IN, PTJ0_IN_PU } }, { PINMUX_CFG_REG("PKCR", 0xffec0014, 16, 2) { PTK7_FN, PTK7_OUT, PTK7_IN, PTK7_IN_PU, PTK6_FN, PTK6_OUT, PTK6_IN, PTK6_IN_PU, PTK5_FN, PTK5_OUT, PTK5_IN, PTK5_IN_PU, PTK4_FN, PTK4_OUT, PTK4_IN, PTK4_IN_PU, PTK3_FN, PTK3_OUT, PTK3_IN, PTK3_IN_PU, PTK2_FN, PTK2_OUT, PTK2_IN, PTK2_IN_PU, PTK1_FN, PTK1_OUT, PTK1_IN, PTK1_IN_PU, PTK0_FN, PTK0_OUT, PTK0_IN, PTK0_IN_PU } }, { PINMUX_CFG_REG("PLCR", 0xffec0016, 16, 2) { 0, 0, 0, 0, /* reserved: always set 1 */ PTL6_FN, PTL6_OUT, PTL6_IN, PTL6_IN_PU, PTL5_FN, PTL5_OUT, PTL5_IN, PTL5_IN_PU, PTL4_FN, PTL4_OUT, PTL4_IN, PTL4_IN_PU, PTL3_FN, PTL3_OUT, PTL3_IN, PTL3_IN_PU, PTL2_FN, PTL2_OUT, PTL2_IN, PTL2_IN_PU, PTL1_FN, PTL1_OUT, PTL1_IN, PTL1_IN_PU, PTL0_FN, PTL0_OUT, PTL0_IN, PTL0_IN_PU } }, { PINMUX_CFG_REG("PMCR", 0xffec0018, 16, 2) { PTM7_FN, PTM7_OUT, PTM7_IN, PTM7_IN_PU, PTM6_FN, PTM6_OUT, PTM6_IN, PTM6_IN_PU, PTM5_FN, PTM5_OUT, PTM5_IN, PTM5_IN_PU, PTM4_FN, PTM4_OUT, PTM4_IN, PTM4_IN_PU, PTM3_FN, PTM3_OUT, PTM3_IN, 0, PTM2_FN, PTM2_OUT, PTM2_IN, 0, PTM1_FN, PTM1_OUT, PTM1_IN, 0, PTM0_FN, PTM0_OUT, PTM0_IN, 0 } }, { PINMUX_CFG_REG("PNCR", 0xffec001a, 16, 2) { 0, 0, 0, 0, /* reserved: always set 1 */ PTN6_FN, PTN6_OUT, PTN6_IN, 0, PTN5_FN, PTN5_OUT, PTN5_IN, 0, PTN4_FN, PTN4_OUT, PTN4_IN, PTN4_IN_PU, PTN3_FN, PTN3_OUT, PTN3_IN, PTN3_IN_PU, PTN2_FN, PTN2_OUT, PTN2_IN, PTN2_IN_PU, PTN1_FN, PTN1_OUT, PTN1_IN, PTN1_IN_PU, PTN0_FN, PTN0_OUT, PTN0_IN, PTN0_IN_PU } }, { PINMUX_CFG_REG("POCR", 0xffec001c, 16, 2) { PTO7_FN, PTO7_OUT, PTO7_IN, PTO7_IN_PU, PTO6_FN, PTO6_OUT, PTO6_IN, PTO6_IN_PU, PTO5_FN, PTO5_OUT, PTO5_IN, PTO5_IN_PU, PTO4_FN, PTO4_OUT, PTO4_IN, PTO4_IN_PU, PTO3_FN, PTO3_OUT, PTO3_IN, PTO3_IN_PU, PTO2_FN, PTO2_OUT, PTO2_IN, PTO2_IN_PU, PTO1_FN, PTO1_OUT, PTO1_IN, PTO1_IN_PU, PTO0_FN, PTO0_OUT, PTO0_IN, PTO0_IN_PU } }, #if 0 /* FIXME: Remove it? */ { PINMUX_CFG_REG("PPCR", 0xffec001e, 16, 2) { 0, 0, 0, 0, /* reserved: always set 1 */ PTP6_FN, PTP6_OUT, PTP6_IN, 0, PTP5_FN, PTP5_OUT, PTP5_IN, 0, PTP4_FN, PTP4_OUT, PTP4_IN, 0, PTP3_FN, PTP3_OUT, PTP3_IN, 0, PTP2_FN, PTP2_OUT, PTP2_IN, 0, PTP1_FN, PTP1_OUT, PTP1_IN, 0, PTP0_FN, PTP0_OUT, PTP0_IN, 0 } }, #endif { PINMUX_CFG_REG("PQCR", 0xffec0020, 16, 2) { 0, 0, 0, 0, /* reserved: always set 1 */ PTQ6_FN, PTQ6_OUT, PTQ6_IN, 0, PTQ5_FN, PTQ5_OUT, PTQ5_IN, 0, PTQ4_FN, PTQ4_OUT, PTQ4_IN, 0, PTQ3_FN, PTQ3_OUT, PTQ3_IN, 0, PTQ2_FN, PTQ2_OUT, PTQ2_IN, 0, PTQ1_FN, PTQ1_OUT, PTQ1_IN, 0, PTQ0_FN, PTQ0_OUT, PTQ0_IN, 0 } }, { PINMUX_CFG_REG("PRCR", 0xffec0022, 16, 2) { PTR7_FN, PTR7_OUT, PTR7_IN, 0, PTR6_FN, PTR6_OUT, PTR6_IN, 0, PTR5_FN, PTR5_OUT, PTR5_IN, 0, PTR4_FN, PTR4_OUT, PTR4_IN, 0, PTR3_FN, PTR3_OUT, PTR3_IN, 0, PTR2_FN, PTR2_OUT, PTR2_IN, 0, PTR1_FN, PTR1_OUT, PTR1_IN, 0, PTR0_FN, PTR0_OUT, PTR0_IN, 0 } }, { PINMUX_CFG_REG("PSCR", 0xffec0024, 16, 2) { PTS7_FN, PTS7_OUT, PTS7_IN, 0, PTS6_FN, PTS6_OUT, PTS6_IN, 0, PTS5_FN, PTS5_OUT, PTS5_IN, 0, PTS4_FN, PTS4_OUT, PTS4_IN, 0, PTS3_FN, PTS3_OUT, PTS3_IN, 0, PTS2_FN, PTS2_OUT, PTS2_IN, 0, PTS1_FN, PTS1_OUT, PTS1_IN, 0, PTS0_FN, PTS0_OUT, PTS0_IN, 0 } }, { PINMUX_CFG_REG("PTCR", 0xffec0026, 16, 2) { PTT7_FN, PTT7_OUT, PTT7_IN, PTO7_IN_PU, PTT6_FN, PTT6_OUT, PTT6_IN, PTO6_IN_PU, PTT5_FN, PTT5_OUT, PTT5_IN, PTO5_IN_PU, PTT4_FN, PTT4_OUT, PTT4_IN, PTO4_IN_PU, PTT3_FN, PTT3_OUT, PTT3_IN, PTO3_IN_PU, PTT2_FN, PTT2_OUT, PTT2_IN, PTO2_IN_PU, PTT1_FN, PTT1_OUT, PTT1_IN, PTO1_IN_PU, PTT0_FN, PTT0_OUT, PTT0_IN, PTO0_IN_PU } }, { PINMUX_CFG_REG("PUCR", 0xffec0028, 16, 2) { PTU7_FN, PTU7_OUT, PTU7_IN, PTU7_IN_PU, PTU6_FN, PTU6_OUT, PTU6_IN, PTU6_IN_PU, PTU5_FN, PTU5_OUT, PTU5_IN, PTU5_IN_PU, PTU4_FN, PTU4_OUT, PTU4_IN, PTU4_IN_PU, PTU3_FN, PTU3_OUT, PTU3_IN, PTU3_IN_PU, PTU2_FN, PTU2_OUT, PTU2_IN, PTU2_IN_PU, PTU1_FN, PTU1_OUT, PTU1_IN, PTU1_IN_PU, PTU0_FN, PTU0_OUT, PTU0_IN, PTU0_IN_PU } }, { PINMUX_CFG_REG("PVCR", 0xffec002a, 16, 2) { PTV7_FN, PTV7_OUT, PTV7_IN, PTV7_IN_PU, PTV6_FN, PTV6_OUT, PTV6_IN, PTV6_IN_PU, PTV5_FN, PTV5_OUT, PTV5_IN, PTV5_IN_PU, PTV4_FN, PTV4_OUT, PTV4_IN, PTV4_IN_PU, PTV3_FN, PTV3_OUT, PTV3_IN, PTV3_IN_PU, PTV2_FN, PTV2_OUT, PTV2_IN, PTV2_IN_PU, PTV1_FN, PTV1_OUT, PTV1_IN, 0, PTV0_FN, PTV0_OUT, PTV0_IN, 0 } }, { PINMUX_CFG_REG("PWCR", 0xffec002c, 16, 2) { PTW7_FN, PTW7_OUT, PTW7_IN, 0, PTW6_FN, PTW6_OUT, PTW6_IN, 0, PTW5_FN, PTW5_OUT, PTW5_IN, 0, PTW4_FN, PTW4_OUT, PTW4_IN, 0, PTW3_FN, PTW3_OUT, PTW3_IN, 0, PTW2_FN, PTW2_OUT, PTW2_IN, 0, PTW1_FN, PTW1_OUT, PTW1_IN, PTW1_IN_PU, PTW0_FN, PTW0_OUT, PTW0_IN, PTW0_IN_PU } }, { PINMUX_CFG_REG("PXCR", 0xffec002e, 16, 2) { PTX7_FN, PTX7_OUT, PTX7_IN, PTX7_IN_PU, PTX6_FN, PTX6_OUT, PTX6_IN, PTX6_IN_PU, PTX5_FN, PTX5_OUT, PTX5_IN, PTX5_IN_PU, PTX4_FN, PTX4_OUT, PTX4_IN, PTX4_IN_PU, PTX3_FN, PTX3_OUT, PTX3_IN, PTX3_IN_PU, PTX2_FN, PTX2_OUT, PTX2_IN, PTX2_IN_PU, PTX1_FN, PTX1_OUT, PTX1_IN, PTX1_IN_PU, PTX0_FN, PTX0_OUT, PTX0_IN, PTX0_IN_PU } }, { PINMUX_CFG_REG("PYCR", 0xffec0030, 16, 2) { PTY7_FN, PTY7_OUT, PTY7_IN, PTY7_IN_PU, PTY6_FN, PTY6_OUT, PTY6_IN, PTY6_IN_PU, PTY5_FN, PTY5_OUT, PTY5_IN, PTY5_IN_PU, PTY4_FN, PTY4_OUT, PTY4_IN, PTY4_IN_PU, PTY3_FN, PTY3_OUT, PTY3_IN, PTY3_IN_PU, PTY2_FN, PTY2_OUT, PTY2_IN, PTY2_IN_PU, PTY1_FN, PTY1_OUT, PTY1_IN, PTY1_IN_PU, PTY0_FN, PTY0_OUT, PTY0_IN, PTY0_IN_PU } }, { PINMUX_CFG_REG("PZCR", 0xffec0032, 16, 2) { PTZ7_FN, PTZ7_OUT, PTZ7_IN, 0, PTZ6_FN, PTZ6_OUT, PTZ6_IN, 0, PTZ5_FN, PTZ5_OUT, PTZ5_IN, 0, PTZ4_FN, PTZ4_OUT, PTZ4_IN, 0, PTZ3_FN, PTZ3_OUT, PTZ3_IN, 0, PTZ2_FN, PTZ2_OUT, PTZ2_IN, 0, PTZ1_FN, PTZ1_OUT, PTZ1_IN, 0, PTZ0_FN, PTZ0_OUT, PTZ0_IN, 0 } }, { PINMUX_CFG_REG("PSEL0", 0xffec0070, 16, 1) { PS0_15_FN1, PS0_15_FN2, PS0_14_FN1, PS0_14_FN2, PS0_13_FN1, PS0_13_FN2, PS0_12_FN1, PS0_12_FN2, PS0_11_FN1, PS0_11_FN2, PS0_10_FN1, PS0_10_FN2, PS0_9_FN1, PS0_9_FN2, PS0_8_FN1, PS0_8_FN2, PS0_7_FN1, PS0_7_FN2, PS0_6_FN1, PS0_6_FN2, PS0_5_FN1, PS0_5_FN2, PS0_4_FN1, PS0_4_FN2, PS0_3_FN1, PS0_3_FN2, PS0_2_FN1, PS0_2_FN2, 0, 0, 0, 0, } }, { PINMUX_CFG_REG("PSEL1", 0xffec0072, 16, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PS1_10_FN1, PS1_10_FN2, PS1_9_FN1, PS1_9_FN2, PS1_8_FN1, PS1_8_FN2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PS1_2_FN1, PS1_2_FN2, 0, 0, 0, 0, } }, { PINMUX_CFG_REG("PSEL2", 0xffec0074, 16, 1) { 0, 0, 0, 0, PS2_13_FN1, PS2_13_FN2, PS2_12_FN1, PS2_12_FN2, 0, 0, 0, 0, 0, 0, 0, 0, PS2_7_FN1, PS2_7_FN2, PS2_6_FN1, PS2_6_FN2, PS2_5_FN1, PS2_5_FN2, PS2_4_FN1, PS2_4_FN2, 0, 0, PS2_2_FN1, PS2_2_FN2, 0, 0, 0, 0, } }, { PINMUX_CFG_REG("PSEL3", 0xffec0076, 16, 1) { PS3_15_FN1, PS3_15_FN2, PS3_14_FN1, PS3_14_FN2, PS3_13_FN1, PS3_13_FN2, PS3_12_FN1, PS3_12_FN2, PS3_11_FN1, PS3_11_FN2, PS3_10_FN1, PS3_10_FN2, PS3_9_FN1, PS3_9_FN2, PS3_8_FN1, PS3_8_FN2, PS3_7_FN1, PS3_7_FN2, 0, 0, 0, 0, 0, 0, 0, 0, PS3_2_FN1, PS3_2_FN2, PS3_1_FN1, PS3_1_FN2, 0, 0, } }, { PINMUX_CFG_REG("PSEL4", 0xffec0078, 16, 1) { 0, 0, PS4_14_FN1, PS4_14_FN2, PS4_13_FN1, PS4_13_FN2, PS4_12_FN1, PS4_12_FN2, 0, 0, PS4_10_FN1, PS4_10_FN2, PS4_9_FN1, PS4_9_FN2, PS4_8_FN1, PS4_8_FN2, 0, 0, 0, 0, 0, 0, PS4_4_FN1, PS4_4_FN2, PS4_3_FN1, PS4_3_FN2, PS4_2_FN1, PS4_2_FN2, PS4_1_FN1, PS4_1_FN2, PS4_0_FN1, PS4_0_FN2, } }, { PINMUX_CFG_REG("PSEL5", 0xffec007a, 16, 1) { 0, 0, 0, 0, 0, 0, 0, 0, PS5_11_FN1, PS5_11_FN2, PS5_10_FN1, PS5_10_FN2, PS5_9_FN1, PS5_9_FN2, PS5_8_FN1, PS5_8_FN2, PS5_7_FN1, PS5_7_FN2, PS5_6_FN1, PS5_6_FN2, PS5_5_FN1, PS5_5_FN2, PS5_4_FN1, PS5_4_FN2, PS5_3_FN1, PS5_3_FN2, PS5_2_FN1, PS5_2_FN2, 0, 0, 0, 0, } }, { PINMUX_CFG_REG("PSEL6", 0xffec007c, 16, 1) { PS6_15_FN1, PS6_15_FN2, PS6_14_FN1, PS6_14_FN2, PS6_13_FN1, PS6_13_FN2, PS6_12_FN1, PS6_12_FN2, PS6_11_FN1, PS6_11_FN2, PS6_10_FN1, PS6_10_FN2, PS6_9_FN1, PS6_9_FN2, PS6_8_FN1, PS6_8_FN2, PS6_7_FN1, PS6_7_FN2, PS6_6_FN1, PS6_6_FN2, PS6_5_FN1, PS6_5_FN2, PS6_4_FN1, PS6_4_FN2, PS6_3_FN1, PS6_3_FN2, PS6_2_FN1, PS6_2_FN2, PS6_1_FN1, PS6_1_FN2, PS6_0_FN1, PS6_0_FN2, } }, { PINMUX_CFG_REG("PSEL7", 0xffec0082, 16, 1) { PS7_15_FN1, PS7_15_FN2, PS7_14_FN1, PS7_14_FN2, PS7_13_FN1, PS7_13_FN2, PS7_12_FN1, PS7_12_FN2, PS7_11_FN1, PS7_11_FN2, PS7_10_FN1, PS7_10_FN2, PS7_9_FN1, PS7_9_FN2, PS7_8_FN1, PS7_8_FN2, PS7_7_FN1, PS7_7_FN2, PS7_6_FN1, PS7_6_FN2, PS7_5_FN1, PS7_5_FN2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } }, { PINMUX_CFG_REG("PSEL8", 0xffec0084, 16, 1) { PS8_15_FN1, PS8_15_FN2, PS8_14_FN1, PS8_14_FN2, PS8_13_FN1, PS8_13_FN2, PS8_12_FN1, PS8_12_FN2, PS8_11_FN1, PS8_11_FN2, PS8_10_FN1, PS8_10_FN2, PS8_9_FN1, PS8_9_FN2, PS8_8_FN1, PS8_8_FN2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } }, {} }; static struct pinmux_data_reg pinmux_data_regs[] = { { PINMUX_DATA_REG("PADR", 0xffec0034, 8) { PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA, PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA } }, { PINMUX_DATA_REG("PBDR", 0xffec0036, 8) { PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA, PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA } }, { PINMUX_DATA_REG("PCDR", 0xffec0038, 8) { PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA } }, { PINMUX_DATA_REG("PDDR", 0xffec003a, 8) { PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA, PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA } }, { PINMUX_DATA_REG("PEDR", 0xffec003c, 8) { PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA } }, { PINMUX_DATA_REG("PFDR", 0xffec003e, 8) { PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA, PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA } }, { PINMUX_DATA_REG("PGDR", 0xffec0040, 8) { PTG7_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA } }, { PINMUX_DATA_REG("PHDR", 0xffec0042, 8) { PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA, PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA } }, { PINMUX_DATA_REG("PIDR", 0xffec0044, 8) { PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA, PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA } }, { PINMUX_DATA_REG("PJDR", 0xffec0046, 8) { 0, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA, PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA } }, { PINMUX_DATA_REG("PKDR", 0xffec0048, 8) { PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA, PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA } }, { PINMUX_DATA_REG("PLDR", 0xffec004a, 8) { 0, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA } }, { PINMUX_DATA_REG("PMDR", 0xffec004c, 8) { PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA, PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA } }, { PINMUX_DATA_REG("PNDR", 0xffec004e, 8) { 0, PTN6_DATA, PTN5_DATA, PTN4_DATA, PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA } }, { PINMUX_DATA_REG("PODR", 0xffec0050, 8) { PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA, PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA } }, { PINMUX_DATA_REG("PPDR", 0xffec0052, 8) { PTP7_DATA, PTP6_DATA, PTP5_DATA, PTP4_DATA, PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA } }, { PINMUX_DATA_REG("PQDR", 0xffec0054, 8) { 0, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA, PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA } }, { PINMUX_DATA_REG("PRDR", 0xffec0056, 8) { PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA } }, { PINMUX_DATA_REG("PSDR", 0xffec0058, 8) { PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA } }, { PINMUX_DATA_REG("PTDR", 0xffec005a, 8) { PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA } }, { PINMUX_DATA_REG("PUDR", 0xffec005c, 8) { PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA } }, { PINMUX_DATA_REG("PVDR", 0xffec005e, 8) { PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA } }, { PINMUX_DATA_REG("PWDR", 0xffec0060, 8) { PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA, PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA } }, { PINMUX_DATA_REG("PXDR", 0xffec0062, 8) { PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA, PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA } }, { PINMUX_DATA_REG("PYDR", 0xffec0064, 8) { PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA, PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA } }, { PINMUX_DATA_REG("PZDR", 0xffec0066, 8) { PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA, PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA } }, { }, }; static struct pinmux_info sh7757_pinmux_info = { .name = "sh7757_pfc", .reserved_id = PINMUX_RESERVED, .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END }, .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END }, .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .first_gpio = GPIO_PTA0, .last_gpio = GPIO_FN_ON_DQ0, .gpios = pinmux_gpios, .cfg_regs = pinmux_config_regs, .data_regs = pinmux_data_regs, .gpio_data = pinmux_data, .gpio_data_size = ARRAY_SIZE(pinmux_data), }; static int __init plat_pinmux_setup(void) { return register_pinmux(&sh7757_pinmux_info); } arch_initcall(plat_pinmux_setup);
gpl-2.0
tipnispranav/android-goldfish-3.4-rt
drivers/video/stifb.c
9887
36847
/* * linux/drivers/video/stifb.c - * Low level Frame buffer driver for HP workstations with * STI (standard text interface) video firmware. * * Copyright (C) 2001-2006 Helge Deller <deller@gmx.de> * Portions Copyright (C) 2001 Thomas Bogendoerfer <tsbogend@alpha.franken.de> * * Based on: * - linux/drivers/video/artistfb.c -- Artist frame buffer driver * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> * - based on skeletonfb, which was * Created 28 Dec 1997 by Geert Uytterhoeven * - HP Xhp cfb-based X11 window driver for XFree86 * (c)Copyright 1992 Hewlett-Packard Co. * * * The following graphics display devices (NGLE family) are supported by this driver: * * HPA4070A known as "HCRX", a 1280x1024 color device with 8 planes * HPA4071A known as "HCRX24", a 1280x1024 color device with 24 planes, * optionally available with a hardware accelerator as HPA4071A_Z * HPA1659A known as "CRX", a 1280x1024 color device with 8 planes * HPA1439A known as "CRX24", a 1280x1024 color device with 24 planes, * optionally available with a hardware accelerator. * HPA1924A known as "GRX", a 1280x1024 grayscale device with 8 planes * HPA2269A known as "Dual CRX", a 1280x1024 color device with 8 planes, * implements support for two displays on a single graphics card. * HP710C internal graphics support optionally available on the HP9000s710 SPU, * supports 1280x1024 color displays with 8 planes. * HP710G same as HP710C, 1280x1024 grayscale only * HP710L same as HP710C, 1024x768 color only * HP712 internal graphics support on HP9000s712 SPU, supports 640x480, * 1024x768 or 1280x1024 color displays on 8 planes (Artist) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ /* TODO: * - 1bpp mode is completely untested * - add support for h/w acceleration * - add hardware cursor * - automatically disable double buffering (e.g. on RDI precisionbook laptop) */ /* on supported graphic devices you may: * #define FALLBACK_TO_1BPP to fall back to 1 bpp, or * #undef FALLBACK_TO_1BPP to reject support for unsupported cards */ #undef FALLBACK_TO_1BPP #undef DEBUG_STIFB_REGS /* debug sti register accesses */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/ioport.h> #include <asm/grfioctl.h> /* for HP-UX compatibility */ #include <asm/uaccess.h> #include "sticore.h" /* REGION_BASE(fb_info, index) returns the virtual address for region <index> */ #define REGION_BASE(fb_info, index) \ F_EXTEND(fb_info->sti->glob_cfg->region_ptrs[index]) #define NGLEDEVDEPROM_CRT_REGION 1 #define NR_PALETTE 256 typedef struct { __s32 video_config_reg; __s32 misc_video_start; __s32 horiz_timing_fmt; __s32 serr_timing_fmt; __s32 vert_timing_fmt; __s32 horiz_state; __s32 vert_state; __s32 vtg_state_elements; __s32 pipeline_delay; __s32 misc_video_end; } video_setup_t; typedef struct { __s16 sizeof_ngle_data; __s16 x_size_visible; /* visible screen dim in pixels */ __s16 y_size_visible; __s16 pad2[15]; __s16 cursor_pipeline_delay; __s16 video_interleaves; __s32 pad3[11]; } ngle_rom_t; struct stifb_info { struct fb_info info; unsigned int id; ngle_rom_t ngle_rom; struct sti_struct *sti; int deviceSpecificConfig; u32 pseudo_palette[16]; }; static int __initdata stifb_bpp_pref[MAX_STI_ROMS]; /* ------------------- chipset specific functions -------------------------- */ /* offsets to graphic-chip internal registers */ #define REG_1 0x000118 #define REG_2 0x000480 #define REG_3 0x0004a0 #define REG_4 0x000600 #define REG_6 0x000800 #define REG_8 0x000820 #define REG_9 0x000a04 #define REG_10 0x018000 #define REG_11 0x018004 #define REG_12 0x01800c #define REG_13 0x018018 #define REG_14 0x01801c #define REG_15 0x200000 #define REG_15b0 0x200000 #define REG_16b1 0x200005 #define REG_16b3 0x200007 #define REG_21 0x200218 #define REG_22 0x0005a0 #define REG_23 0x0005c0 #define REG_26 0x200118 #define REG_27 0x200308 #define REG_32 0x21003c #define REG_33 0x210040 #define REG_34 0x200008 #define REG_35 0x018010 #define REG_38 0x210020 #define REG_39 0x210120 #define REG_40 0x210130 #define REG_42 0x210028 #define REG_43 0x21002c #define REG_44 0x210030 #define REG_45 0x210034 #define READ_BYTE(fb,reg) gsc_readb((fb)->info.fix.mmio_start + (reg)) #define READ_WORD(fb,reg) gsc_readl((fb)->info.fix.mmio_start + (reg)) #ifndef DEBUG_STIFB_REGS # define DEBUG_OFF() # define DEBUG_ON() # define WRITE_BYTE(value,fb,reg) gsc_writeb((value),(fb)->info.fix.mmio_start + (reg)) # define WRITE_WORD(value,fb,reg) gsc_writel((value),(fb)->info.fix.mmio_start + (reg)) #else static int debug_on = 1; # define DEBUG_OFF() debug_on=0 # define DEBUG_ON() debug_on=1 # define WRITE_BYTE(value,fb,reg) do { if (debug_on) \ printk(KERN_DEBUG "%30s: WRITE_BYTE(0x%06x) = 0x%02x (old=0x%02x)\n", \ __func__, reg, value, READ_BYTE(fb,reg)); \ gsc_writeb((value),(fb)->info.fix.mmio_start + (reg)); } while (0) # define WRITE_WORD(value,fb,reg) do { if (debug_on) \ printk(KERN_DEBUG "%30s: WRITE_WORD(0x%06x) = 0x%08x (old=0x%08x)\n", \ __func__, reg, value, READ_WORD(fb,reg)); \ gsc_writel((value),(fb)->info.fix.mmio_start + (reg)); } while (0) #endif /* DEBUG_STIFB_REGS */ #define ENABLE 1 /* for enabling/disabling screen */ #define DISABLE 0 #define NGLE_LOCK(fb_info) do { } while (0) #define NGLE_UNLOCK(fb_info) do { } while (0) static void SETUP_HW(struct stifb_info *fb) { char stat; do { stat = READ_BYTE(fb, REG_15b0); if (!stat) stat = READ_BYTE(fb, REG_15b0); } while (stat); } static void SETUP_FB(struct stifb_info *fb) { unsigned int reg10_value = 0; SETUP_HW(fb); switch (fb->id) { case CRT_ID_VISUALIZE_EG: case S9000_ID_ARTIST: case S9000_ID_A1659A: reg10_value = 0x13601000; break; case S9000_ID_A1439A: if (fb->info.var.bits_per_pixel == 32) reg10_value = 0xBBA0A000; else reg10_value = 0x13601000; break; case S9000_ID_HCRX: if (fb->info.var.bits_per_pixel == 32) reg10_value = 0xBBA0A000; else reg10_value = 0x13602000; break; case S9000_ID_TIMBER: case CRX24_OVERLAY_PLANES: reg10_value = 0x13602000; break; } if (reg10_value) WRITE_WORD(reg10_value, fb, REG_10); WRITE_WORD(0x83000300, fb, REG_14); SETUP_HW(fb); WRITE_BYTE(1, fb, REG_16b1); } static void START_IMAGE_COLORMAP_ACCESS(struct stifb_info *fb) { SETUP_HW(fb); WRITE_WORD(0xBBE0F000, fb, REG_10); WRITE_WORD(0x03000300, fb, REG_14); WRITE_WORD(~0, fb, REG_13); } static void WRITE_IMAGE_COLOR(struct stifb_info *fb, int index, int color) { SETUP_HW(fb); WRITE_WORD(((0x100+index)<<2), fb, REG_3); WRITE_WORD(color, fb, REG_4); } static void FINISH_IMAGE_COLORMAP_ACCESS(struct stifb_info *fb) { WRITE_WORD(0x400, fb, REG_2); if (fb->info.var.bits_per_pixel == 32) { WRITE_WORD(0x83000100, fb, REG_1); } else { if (fb->id == S9000_ID_ARTIST || fb->id == CRT_ID_VISUALIZE_EG) WRITE_WORD(0x80000100, fb, REG_26); else WRITE_WORD(0x80000100, fb, REG_1); } SETUP_FB(fb); } static void SETUP_RAMDAC(struct stifb_info *fb) { SETUP_HW(fb); WRITE_WORD(0x04000000, fb, 0x1020); WRITE_WORD(0xff000000, fb, 0x1028); } static void CRX24_SETUP_RAMDAC(struct stifb_info *fb) { SETUP_HW(fb); WRITE_WORD(0x04000000, fb, 0x1000); WRITE_WORD(0x02000000, fb, 0x1004); WRITE_WORD(0xff000000, fb, 0x1008); WRITE_WORD(0x05000000, fb, 0x1000); WRITE_WORD(0x02000000, fb, 0x1004); WRITE_WORD(0x03000000, fb, 0x1008); } #if 0 static void HCRX_SETUP_RAMDAC(struct stifb_info *fb) { WRITE_WORD(0xffffffff, fb, REG_32); } #endif static void CRX24_SET_OVLY_MASK(struct stifb_info *fb) { SETUP_HW(fb); WRITE_WORD(0x13a02000, fb, REG_11); WRITE_WORD(0x03000300, fb, REG_14); WRITE_WORD(0x000017f0, fb, REG_3); WRITE_WORD(0xffffffff, fb, REG_13); WRITE_WORD(0xffffffff, fb, REG_22); WRITE_WORD(0x00000000, fb, REG_23); } static void ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable) { unsigned int value = enable ? 0x43000000 : 0x03000000; SETUP_HW(fb); WRITE_WORD(0x06000000, fb, 0x1030); WRITE_WORD(value, fb, 0x1038); } static void CRX24_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable) { unsigned int value = enable ? 0x10000000 : 0x30000000; SETUP_HW(fb); WRITE_WORD(0x01000000, fb, 0x1000); WRITE_WORD(0x02000000, fb, 0x1004); WRITE_WORD(value, fb, 0x1008); } static void ARTIST_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable) { u32 DregsMiscVideo = REG_21; u32 DregsMiscCtl = REG_27; SETUP_HW(fb); if (enable) { WRITE_WORD(READ_WORD(fb, DregsMiscVideo) | 0x0A000000, fb, DregsMiscVideo); WRITE_WORD(READ_WORD(fb, DregsMiscCtl) | 0x00800000, fb, DregsMiscCtl); } else { WRITE_WORD(READ_WORD(fb, DregsMiscVideo) & ~0x0A000000, fb, DregsMiscVideo); WRITE_WORD(READ_WORD(fb, DregsMiscCtl) & ~0x00800000, fb, DregsMiscCtl); } } #define GET_ROMTABLE_INDEX(fb) \ (READ_BYTE(fb, REG_16b3) - 1) #define HYPER_CONFIG_PLANES_24 0x00000100 #define IS_24_DEVICE(fb) \ (fb->deviceSpecificConfig & HYPER_CONFIG_PLANES_24) #define IS_888_DEVICE(fb) \ (!(IS_24_DEVICE(fb))) #define GET_FIFO_SLOTS(fb, cnt, numslots) \ { while (cnt < numslots) \ cnt = READ_WORD(fb, REG_34); \ cnt -= numslots; \ } #define IndexedDcd 0 /* Pixel data is indexed (pseudo) color */ #define Otc04 2 /* Pixels in each longword transfer (4) */ #define Otc32 5 /* Pixels in each longword transfer (32) */ #define Ots08 3 /* Each pixel is size (8)d transfer (1) */ #define OtsIndirect 6 /* Each bit goes through FG/BG color(8) */ #define AddrLong 5 /* FB address is Long aligned (pixel) */ #define BINovly 0x2 /* 8 bit overlay */ #define BINapp0I 0x0 /* Application Buffer 0, Indexed */ #define BINapp1I 0x1 /* Application Buffer 1, Indexed */ #define BINapp0F8 0xa /* Application Buffer 0, Fractional 8-8-8 */ #define BINattr 0xd /* Attribute Bitmap */ #define RopSrc 0x3 #define BitmapExtent08 3 /* Each write hits ( 8) bits in depth */ #define BitmapExtent32 5 /* Each write hits (32) bits in depth */ #define DataDynamic 0 /* Data register reloaded by direct access */ #define MaskDynamic 1 /* Mask register reloaded by direct access */ #define MaskOtc 0 /* Mask contains Object Count valid bits */ #define MaskAddrOffset(offset) (offset) #define StaticReg(en) (en) #define BGx(en) (en) #define FGx(en) (en) #define BAJustPoint(offset) (offset) #define BAIndexBase(base) (base) #define BA(F,C,S,A,J,B,I) \ (((F)<<31)|((C)<<27)|((S)<<24)|((A)<<21)|((J)<<16)|((B)<<12)|(I)) #define IBOvals(R,M,X,S,D,L,B,F) \ (((R)<<8)|((M)<<16)|((X)<<24)|((S)<<29)|((D)<<28)|((L)<<31)|((B)<<1)|(F)) #define NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb, val) \ WRITE_WORD(val, fb, REG_14) #define NGLE_QUICK_SET_DST_BM_ACCESS(fb, val) \ WRITE_WORD(val, fb, REG_11) #define NGLE_QUICK_SET_CTL_PLN_REG(fb, val) \ WRITE_WORD(val, fb, REG_12) #define NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, plnmsk32) \ WRITE_WORD(plnmsk32, fb, REG_13) #define NGLE_REALLY_SET_IMAGE_FG_COLOR(fb, fg32) \ WRITE_WORD(fg32, fb, REG_35) #define NGLE_SET_TRANSFERDATA(fb, val) \ WRITE_WORD(val, fb, REG_8) #define NGLE_SET_DSTXY(fb, val) \ WRITE_WORD(val, fb, REG_6) #define NGLE_LONG_FB_ADDRESS(fbaddrbase, x, y) ( \ (u32) (fbaddrbase) + \ ( (unsigned int) ( (y) << 13 ) | \ (unsigned int) ( (x) << 2 ) ) \ ) #define NGLE_BINC_SET_DSTADDR(fb, addr) \ WRITE_WORD(addr, fb, REG_3) #define NGLE_BINC_SET_SRCADDR(fb, addr) \ WRITE_WORD(addr, fb, REG_2) #define NGLE_BINC_SET_DSTMASK(fb, mask) \ WRITE_WORD(mask, fb, REG_22) #define NGLE_BINC_WRITE32(fb, data32) \ WRITE_WORD(data32, fb, REG_23) #define START_COLORMAPLOAD(fb, cmapBltCtlData32) \ WRITE_WORD((cmapBltCtlData32), fb, REG_38) #define SET_LENXY_START_RECFILL(fb, lenxy) \ WRITE_WORD(lenxy, fb, REG_9) static void HYPER_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable) { u32 DregsHypMiscVideo = REG_33; unsigned int value; SETUP_HW(fb); value = READ_WORD(fb, DregsHypMiscVideo); if (enable) value |= 0x0A000000; else value &= ~0x0A000000; WRITE_WORD(value, fb, DregsHypMiscVideo); } /* BufferNumbers used by SETUP_ATTR_ACCESS() */ #define BUFF0_CMAP0 0x00001e02 #define BUFF1_CMAP0 0x02001e02 #define BUFF1_CMAP3 0x0c001e02 #define ARTIST_CMAP0 0x00000102 #define HYPER_CMAP8 0x00000100 #define HYPER_CMAP24 0x00000800 static void SETUP_ATTR_ACCESS(struct stifb_info *fb, unsigned BufferNumber) { SETUP_HW(fb); WRITE_WORD(0x2EA0D000, fb, REG_11); WRITE_WORD(0x23000302, fb, REG_14); WRITE_WORD(BufferNumber, fb, REG_12); WRITE_WORD(0xffffffff, fb, REG_8); } static void SET_ATTR_SIZE(struct stifb_info *fb, int width, int height) { /* REG_6 seems to have special values when run on a RDI precisionbook parisc laptop (INTERNAL_EG_DX1024 or INTERNAL_EG_X1024). The values are: 0x2f0: internal (LCD) & external display enabled 0x2a0: external display only 0x000: zero on standard artist graphic cards */ WRITE_WORD(0x00000000, fb, REG_6); WRITE_WORD((width<<16) | height, fb, REG_9); WRITE_WORD(0x05000000, fb, REG_6); WRITE_WORD(0x00040001, fb, REG_9); } static void FINISH_ATTR_ACCESS(struct stifb_info *fb) { SETUP_HW(fb); WRITE_WORD(0x00000000, fb, REG_12); } static void elkSetupPlanes(struct stifb_info *fb) { SETUP_RAMDAC(fb); SETUP_FB(fb); } static void ngleSetupAttrPlanes(struct stifb_info *fb, int BufferNumber) { SETUP_ATTR_ACCESS(fb, BufferNumber); SET_ATTR_SIZE(fb, fb->info.var.xres, fb->info.var.yres); FINISH_ATTR_ACCESS(fb); SETUP_FB(fb); } static void rattlerSetupPlanes(struct stifb_info *fb) { int saved_id, y; /* Write RAMDAC pixel read mask register so all overlay * planes are display-enabled. (CRX24 uses Bt462 pixel * read mask register for overlay planes, not image planes). */ CRX24_SETUP_RAMDAC(fb); /* change fb->id temporarily to fool SETUP_FB() */ saved_id = fb->id; fb->id = CRX24_OVERLAY_PLANES; SETUP_FB(fb); fb->id = saved_id; for (y = 0; y < fb->info.var.yres; ++y) memset(fb->info.screen_base + y * fb->info.fix.line_length, 0xff, fb->info.var.xres * fb->info.var.bits_per_pixel/8); CRX24_SET_OVLY_MASK(fb); SETUP_FB(fb); } #define HYPER_CMAP_TYPE 0 #define NGLE_CMAP_INDEXED0_TYPE 0 #define NGLE_CMAP_OVERLAY_TYPE 3 /* typedef of LUT (Colormap) BLT Control Register */ typedef union /* Note assumption that fields are packed left-to-right */ { u32 all; struct { unsigned enable : 1; unsigned waitBlank : 1; unsigned reserved1 : 4; unsigned lutOffset : 10; /* Within destination LUT */ unsigned lutType : 2; /* Cursor, image, overlay */ unsigned reserved2 : 4; unsigned length : 10; } fields; } NgleLutBltCtl; #if 0 static NgleLutBltCtl setNgleLutBltCtl(struct stifb_info *fb, int offsetWithinLut, int length) { NgleLutBltCtl lutBltCtl; /* set enable, zero reserved fields */ lutBltCtl.all = 0x80000000; lutBltCtl.fields.length = length; switch (fb->id) { case S9000_ID_A1439A: /* CRX24 */ if (fb->var.bits_per_pixel == 8) { lutBltCtl.fields.lutType = NGLE_CMAP_OVERLAY_TYPE; lutBltCtl.fields.lutOffset = 0; } else { lutBltCtl.fields.lutType = NGLE_CMAP_INDEXED0_TYPE; lutBltCtl.fields.lutOffset = 0 * 256; } break; case S9000_ID_ARTIST: lutBltCtl.fields.lutType = NGLE_CMAP_INDEXED0_TYPE; lutBltCtl.fields.lutOffset = 0 * 256; break; default: lutBltCtl.fields.lutType = NGLE_CMAP_INDEXED0_TYPE; lutBltCtl.fields.lutOffset = 0; break; } /* Offset points to start of LUT. Adjust for within LUT */ lutBltCtl.fields.lutOffset += offsetWithinLut; return lutBltCtl; } #endif static NgleLutBltCtl setHyperLutBltCtl(struct stifb_info *fb, int offsetWithinLut, int length) { NgleLutBltCtl lutBltCtl; /* set enable, zero reserved fields */ lutBltCtl.all = 0x80000000; lutBltCtl.fields.length = length; lutBltCtl.fields.lutType = HYPER_CMAP_TYPE; /* Expect lutIndex to be 0 or 1 for image cmaps, 2 or 3 for overlay cmaps */ if (fb->info.var.bits_per_pixel == 8) lutBltCtl.fields.lutOffset = 2 * 256; else lutBltCtl.fields.lutOffset = 0 * 256; /* Offset points to start of LUT. Adjust for within LUT */ lutBltCtl.fields.lutOffset += offsetWithinLut; return lutBltCtl; } static void hyperUndoITE(struct stifb_info *fb) { int nFreeFifoSlots = 0; u32 fbAddr; NGLE_LOCK(fb); GET_FIFO_SLOTS(fb, nFreeFifoSlots, 1); WRITE_WORD(0xffffffff, fb, REG_32); /* Write overlay transparency mask so only entry 255 is transparent */ /* Hardware setup for full-depth write to "magic" location */ GET_FIFO_SLOTS(fb, nFreeFifoSlots, 7); NGLE_QUICK_SET_DST_BM_ACCESS(fb, BA(IndexedDcd, Otc04, Ots08, AddrLong, BAJustPoint(0), BINovly, BAIndexBase(0))); NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb, IBOvals(RopSrc, MaskAddrOffset(0), BitmapExtent08, StaticReg(0), DataDynamic, MaskOtc, BGx(0), FGx(0))); /* Now prepare to write to the "magic" location */ fbAddr = NGLE_LONG_FB_ADDRESS(0, 1532, 0); NGLE_BINC_SET_DSTADDR(fb, fbAddr); NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffff); NGLE_BINC_SET_DSTMASK(fb, 0xffffffff); /* Finally, write a zero to clear the mask */ NGLE_BINC_WRITE32(fb, 0); NGLE_UNLOCK(fb); } static void ngleDepth8_ClearImagePlanes(struct stifb_info *fb) { /* FIXME! */ } static void ngleDepth24_ClearImagePlanes(struct stifb_info *fb) { /* FIXME! */ } static void ngleResetAttrPlanes(struct stifb_info *fb, unsigned int ctlPlaneReg) { int nFreeFifoSlots = 0; u32 packed_dst; u32 packed_len; NGLE_LOCK(fb); GET_FIFO_SLOTS(fb, nFreeFifoSlots, 4); NGLE_QUICK_SET_DST_BM_ACCESS(fb, BA(IndexedDcd, Otc32, OtsIndirect, AddrLong, BAJustPoint(0), BINattr, BAIndexBase(0))); NGLE_QUICK_SET_CTL_PLN_REG(fb, ctlPlaneReg); NGLE_SET_TRANSFERDATA(fb, 0xffffffff); NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb, IBOvals(RopSrc, MaskAddrOffset(0), BitmapExtent08, StaticReg(1), DataDynamic, MaskOtc, BGx(0), FGx(0))); packed_dst = 0; packed_len = (fb->info.var.xres << 16) | fb->info.var.yres; GET_FIFO_SLOTS(fb, nFreeFifoSlots, 2); NGLE_SET_DSTXY(fb, packed_dst); SET_LENXY_START_RECFILL(fb, packed_len); /* * In order to work around an ELK hardware problem (Buffy doesn't * always flush it's buffers when writing to the attribute * planes), at least 4 pixels must be written to the attribute * planes starting at (X == 1280) and (Y != to the last Y written * by BIF): */ if (fb->id == S9000_ID_A1659A) { /* ELK_DEVICE_ID */ /* It's safe to use scanline zero: */ packed_dst = (1280 << 16); GET_FIFO_SLOTS(fb, nFreeFifoSlots, 2); NGLE_SET_DSTXY(fb, packed_dst); packed_len = (4 << 16) | 1; SET_LENXY_START_RECFILL(fb, packed_len); } /* ELK Hardware Kludge */ /**** Finally, set the Control Plane Register back to zero: ****/ GET_FIFO_SLOTS(fb, nFreeFifoSlots, 1); NGLE_QUICK_SET_CTL_PLN_REG(fb, 0); NGLE_UNLOCK(fb); } static void ngleClearOverlayPlanes(struct stifb_info *fb, int mask, int data) { int nFreeFifoSlots = 0; u32 packed_dst; u32 packed_len; NGLE_LOCK(fb); /* Hardware setup */ GET_FIFO_SLOTS(fb, nFreeFifoSlots, 8); NGLE_QUICK_SET_DST_BM_ACCESS(fb, BA(IndexedDcd, Otc04, Ots08, AddrLong, BAJustPoint(0), BINovly, BAIndexBase(0))); NGLE_SET_TRANSFERDATA(fb, 0xffffffff); /* Write foreground color */ NGLE_REALLY_SET_IMAGE_FG_COLOR(fb, data); NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, mask); packed_dst = 0; packed_len = (fb->info.var.xres << 16) | fb->info.var.yres; NGLE_SET_DSTXY(fb, packed_dst); /* Write zeroes to overlay planes */ NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb, IBOvals(RopSrc, MaskAddrOffset(0), BitmapExtent08, StaticReg(0), DataDynamic, MaskOtc, BGx(0), FGx(0))); SET_LENXY_START_RECFILL(fb, packed_len); NGLE_UNLOCK(fb); } static void hyperResetPlanes(struct stifb_info *fb, int enable) { unsigned int controlPlaneReg; NGLE_LOCK(fb); if (IS_24_DEVICE(fb)) if (fb->info.var.bits_per_pixel == 32) controlPlaneReg = 0x04000F00; else controlPlaneReg = 0x00000F00; /* 0x00000800 should be enough, but lets clear all 4 bits */ else controlPlaneReg = 0x00000F00; /* 0x00000100 should be enough, but lets clear all 4 bits */ switch (enable) { case ENABLE: /* clear screen */ if (IS_24_DEVICE(fb)) ngleDepth24_ClearImagePlanes(fb); else ngleDepth8_ClearImagePlanes(fb); /* Paint attribute planes for default case. * On Hyperdrive, this means all windows using overlay cmap 0. */ ngleResetAttrPlanes(fb, controlPlaneReg); /* clear overlay planes */ ngleClearOverlayPlanes(fb, 0xff, 255); /************************************************** ** Also need to counteract ITE settings **************************************************/ hyperUndoITE(fb); break; case DISABLE: /* clear screen */ if (IS_24_DEVICE(fb)) ngleDepth24_ClearImagePlanes(fb); else ngleDepth8_ClearImagePlanes(fb); ngleResetAttrPlanes(fb, controlPlaneReg); ngleClearOverlayPlanes(fb, 0xff, 0); break; case -1: /* RESET */ hyperUndoITE(fb); ngleResetAttrPlanes(fb, controlPlaneReg); break; } NGLE_UNLOCK(fb); } /* Return pointer to in-memory structure holding ELK device-dependent ROM values. */ static void ngleGetDeviceRomData(struct stifb_info *fb) { #if 0 XXX: FIXME: !!! int *pBytePerLongDevDepData;/* data byte == LSB */ int *pRomTable; NgleDevRomData *pPackedDevRomData; int sizePackedDevRomData = sizeof(*pPackedDevRomData); char *pCard8; int i; char *mapOrigin = NULL; int romTableIdx; pPackedDevRomData = fb->ngle_rom; SETUP_HW(fb); if (fb->id == S9000_ID_ARTIST) { pPackedDevRomData->cursor_pipeline_delay = 4; pPackedDevRomData->video_interleaves = 4; } else { /* Get pointer to unpacked byte/long data in ROM */ pBytePerLongDevDepData = fb->sti->regions[NGLEDEVDEPROM_CRT_REGION]; /* Tomcat supports several resolutions: 1280x1024, 1024x768, 640x480 */ if (fb->id == S9000_ID_TOMCAT) { /* jump to the correct ROM table */ GET_ROMTABLE_INDEX(romTableIdx); while (romTableIdx > 0) { pCard8 = (Card8 *) pPackedDevRomData; pRomTable = pBytePerLongDevDepData; /* Pack every fourth byte from ROM into structure */ for (i = 0; i < sizePackedDevRomData; i++) { *pCard8++ = (Card8) (*pRomTable++); } pBytePerLongDevDepData = (Card32 *) ((Card8 *) pBytePerLongDevDepData + pPackedDevRomData->sizeof_ngle_data); romTableIdx--; } } pCard8 = (Card8 *) pPackedDevRomData; /* Pack every fourth byte from ROM into structure */ for (i = 0; i < sizePackedDevRomData; i++) { *pCard8++ = (Card8) (*pBytePerLongDevDepData++); } } SETUP_FB(fb); #endif } #define HYPERBOWL_MODE_FOR_8_OVER_88_LUT0_NO_TRANSPARENCIES 4 #define HYPERBOWL_MODE01_8_24_LUT0_TRANSPARENT_LUT1_OPAQUE 8 #define HYPERBOWL_MODE01_8_24_LUT0_OPAQUE_LUT1_OPAQUE 10 #define HYPERBOWL_MODE2_8_24 15 /* HCRX specific boot-time initialization */ static void __init SETUP_HCRX(struct stifb_info *fb) { int hyperbowl; int nFreeFifoSlots = 0; if (fb->id != S9000_ID_HCRX) return; /* Initialize Hyperbowl registers */ GET_FIFO_SLOTS(fb, nFreeFifoSlots, 7); if (IS_24_DEVICE(fb)) { hyperbowl = (fb->info.var.bits_per_pixel == 32) ? HYPERBOWL_MODE01_8_24_LUT0_TRANSPARENT_LUT1_OPAQUE : HYPERBOWL_MODE01_8_24_LUT0_OPAQUE_LUT1_OPAQUE; /* First write to Hyperbowl must happen twice (bug) */ WRITE_WORD(hyperbowl, fb, REG_40); WRITE_WORD(hyperbowl, fb, REG_40); WRITE_WORD(HYPERBOWL_MODE2_8_24, fb, REG_39); WRITE_WORD(0x014c0148, fb, REG_42); /* Set lut 0 to be the direct color */ WRITE_WORD(0x404c4048, fb, REG_43); WRITE_WORD(0x034c0348, fb, REG_44); WRITE_WORD(0x444c4448, fb, REG_45); } else { hyperbowl = HYPERBOWL_MODE_FOR_8_OVER_88_LUT0_NO_TRANSPARENCIES; /* First write to Hyperbowl must happen twice (bug) */ WRITE_WORD(hyperbowl, fb, REG_40); WRITE_WORD(hyperbowl, fb, REG_40); WRITE_WORD(0x00000000, fb, REG_42); WRITE_WORD(0x00000000, fb, REG_43); WRITE_WORD(0x00000000, fb, REG_44); WRITE_WORD(0x444c4048, fb, REG_45); } } /* ------------------- driver specific functions --------------------------- */ static int stifb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct stifb_info *fb = (struct stifb_info *) info; u32 color; if (regno >= NR_PALETTE) return 1; red >>= 8; green >>= 8; blue >>= 8; DEBUG_OFF(); START_IMAGE_COLORMAP_ACCESS(fb); if (unlikely(fb->info.var.grayscale)) { /* gray = 0.30*R + 0.59*G + 0.11*B */ color = ((red * 77) + (green * 151) + (blue * 28)) >> 8; } else { color = ((red << 16) | (green << 8) | (blue)); } if (fb->info.fix.visual == FB_VISUAL_DIRECTCOLOR) { struct fb_var_screeninfo *var = &fb->info.var; if (regno < 16) ((u32 *)fb->info.pseudo_palette)[regno] = regno << var->red.offset | regno << var->green.offset | regno << var->blue.offset; } WRITE_IMAGE_COLOR(fb, regno, color); if (fb->id == S9000_ID_HCRX) { NgleLutBltCtl lutBltCtl; lutBltCtl = setHyperLutBltCtl(fb, 0, /* Offset w/i LUT */ 256); /* Load entire LUT */ NGLE_BINC_SET_SRCADDR(fb, NGLE_LONG_FB_ADDRESS(0, 0x100, 0)); /* 0x100 is same as used in WRITE_IMAGE_COLOR() */ START_COLORMAPLOAD(fb, lutBltCtl.all); SETUP_FB(fb); } else { /* cleanup colormap hardware */ FINISH_IMAGE_COLORMAP_ACCESS(fb); } DEBUG_ON(); return 0; } static int stifb_blank(int blank_mode, struct fb_info *info) { struct stifb_info *fb = (struct stifb_info *) info; int enable = (blank_mode == 0) ? ENABLE : DISABLE; switch (fb->id) { case S9000_ID_A1439A: CRX24_ENABLE_DISABLE_DISPLAY(fb, enable); break; case CRT_ID_VISUALIZE_EG: case S9000_ID_ARTIST: ARTIST_ENABLE_DISABLE_DISPLAY(fb, enable); break; case S9000_ID_HCRX: HYPER_ENABLE_DISABLE_DISPLAY(fb, enable); break; case S9000_ID_A1659A: /* fall through */ case S9000_ID_TIMBER: case CRX24_OVERLAY_PLANES: default: ENABLE_DISABLE_DISPLAY(fb, enable); break; } SETUP_FB(fb); return 0; } static void __init stifb_init_display(struct stifb_info *fb) { int id = fb->id; SETUP_FB(fb); /* HCRX specific initialization */ SETUP_HCRX(fb); /* if (id == S9000_ID_HCRX) hyperInitSprite(fb); else ngleInitSprite(fb); */ /* Initialize the image planes. */ switch (id) { case S9000_ID_HCRX: hyperResetPlanes(fb, ENABLE); break; case S9000_ID_A1439A: rattlerSetupPlanes(fb); break; case S9000_ID_A1659A: case S9000_ID_ARTIST: case CRT_ID_VISUALIZE_EG: elkSetupPlanes(fb); break; } /* Clear attribute planes on non HCRX devices. */ switch (id) { case S9000_ID_A1659A: case S9000_ID_A1439A: if (fb->info.var.bits_per_pixel == 32) ngleSetupAttrPlanes(fb, BUFF1_CMAP3); else { ngleSetupAttrPlanes(fb, BUFF1_CMAP0); } if (id == S9000_ID_A1439A) ngleClearOverlayPlanes(fb, 0xff, 0); break; case S9000_ID_ARTIST: case CRT_ID_VISUALIZE_EG: if (fb->info.var.bits_per_pixel == 32) ngleSetupAttrPlanes(fb, BUFF1_CMAP3); else { ngleSetupAttrPlanes(fb, ARTIST_CMAP0); } break; } stifb_blank(0, (struct fb_info *)fb); /* 0=enable screen */ SETUP_FB(fb); } /* ------------ Interfaces to hardware functions ------------ */ static struct fb_ops stifb_ops = { .owner = THIS_MODULE, .fb_setcolreg = stifb_setcolreg, .fb_blank = stifb_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; /* * Initialization */ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) { struct fb_fix_screeninfo *fix; struct fb_var_screeninfo *var; struct stifb_info *fb; struct fb_info *info; unsigned long sti_rom_address; char *dev_name; int bpp, xres, yres; fb = kzalloc(sizeof(*fb), GFP_ATOMIC); if (!fb) { printk(KERN_ERR "stifb: Could not allocate stifb structure\n"); return -ENODEV; } info = &fb->info; /* set struct to a known state */ fix = &info->fix; var = &info->var; fb->sti = sti; /* store upper 32bits of the graphics id */ fb->id = fb->sti->graphics_id[0]; /* only supported cards are allowed */ switch (fb->id) { case CRT_ID_VISUALIZE_EG: /* Visualize cards can run either in "double buffer" or "standard" mode. Depending on the mode, the card reports a different device name, e.g. "INTERNAL_EG_DX1024" in double buffer mode and "INTERNAL_EG_X1024" in standard mode. Since this driver only supports standard mode, we check if the device name contains the string "DX" and tell the user how to reconfigure the card. */ if (strstr(sti->outptr.dev_name, "DX")) { printk(KERN_WARNING "WARNING: stifb framebuffer driver does not support '%s' in double-buffer mode.\n" "WARNING: Please disable the double-buffer mode in IPL menu (the PARISC-BIOS).\n", sti->outptr.dev_name); goto out_err0; } /* fall though */ case S9000_ID_ARTIST: case S9000_ID_HCRX: case S9000_ID_TIMBER: case S9000_ID_A1659A: case S9000_ID_A1439A: break; default: printk(KERN_WARNING "stifb: '%s' (id: 0x%08x) not supported.\n", sti->outptr.dev_name, fb->id); goto out_err0; } /* default to 8 bpp on most graphic chips */ bpp = 8; xres = sti_onscreen_x(fb->sti); yres = sti_onscreen_y(fb->sti); ngleGetDeviceRomData(fb); /* get (virtual) io region base addr */ fix->mmio_start = REGION_BASE(fb,2); fix->mmio_len = 0x400000; /* Reject any device not in the NGLE family */ switch (fb->id) { case S9000_ID_A1659A: /* CRX/A1659A */ break; case S9000_ID_ELM: /* GRX, grayscale but else same as A1659A */ var->grayscale = 1; fb->id = S9000_ID_A1659A; break; case S9000_ID_TIMBER: /* HP9000/710 Any (may be a grayscale device) */ dev_name = fb->sti->outptr.dev_name; if (strstr(dev_name, "GRAYSCALE") || strstr(dev_name, "Grayscale") || strstr(dev_name, "grayscale")) var->grayscale = 1; break; case S9000_ID_TOMCAT: /* Dual CRX, behaves else like a CRX */ /* FIXME: TomCat supports two heads: * fb.iobase = REGION_BASE(fb_info,3); * fb.screen_base = ioremap_nocache(REGION_BASE(fb_info,2),xxx); * for now we only support the left one ! */ xres = fb->ngle_rom.x_size_visible; yres = fb->ngle_rom.y_size_visible; fb->id = S9000_ID_A1659A; break; case S9000_ID_A1439A: /* CRX24/A1439A */ bpp = 32; break; case S9000_ID_HCRX: /* Hyperdrive/HCRX */ memset(&fb->ngle_rom, 0, sizeof(fb->ngle_rom)); if ((fb->sti->regions_phys[0] & 0xfc000000) == (fb->sti->regions_phys[2] & 0xfc000000)) sti_rom_address = F_EXTEND(fb->sti->regions_phys[0]); else sti_rom_address = F_EXTEND(fb->sti->regions_phys[1]); fb->deviceSpecificConfig = gsc_readl(sti_rom_address); if (IS_24_DEVICE(fb)) { if (bpp_pref == 8 || bpp_pref == 32) bpp = bpp_pref; else bpp = 32; } else bpp = 8; READ_WORD(fb, REG_15); SETUP_HW(fb); break; case CRT_ID_VISUALIZE_EG: case S9000_ID_ARTIST: /* Artist */ break; default: #ifdef FALLBACK_TO_1BPP printk(KERN_WARNING "stifb: Unsupported graphics card (id=0x%08x) " "- now trying 1bpp mode instead\n", fb->id); bpp = 1; /* default to 1 bpp */ break; #else printk(KERN_WARNING "stifb: Unsupported graphics card (id=0x%08x) " "- skipping.\n", fb->id); goto out_err0; #endif } /* get framebuffer physical and virtual base addr & len (64bit ready) */ fix->smem_start = F_EXTEND(fb->sti->regions_phys[1]); fix->smem_len = fb->sti->regions[1].region_desc.length * 4096; fix->line_length = (fb->sti->glob_cfg->total_x * bpp) / 8; if (!fix->line_length) fix->line_length = 2048; /* default */ /* limit fbsize to max visible screen size */ if (fix->smem_len > yres*fix->line_length) fix->smem_len = yres*fix->line_length; fix->accel = FB_ACCEL_NONE; switch (bpp) { case 1: fix->type = FB_TYPE_PLANES; /* well, sort of */ fix->visual = FB_VISUAL_MONO10; var->red.length = var->green.length = var->blue.length = 1; break; case 8: fix->type = FB_TYPE_PACKED_PIXELS; fix->visual = FB_VISUAL_PSEUDOCOLOR; var->red.length = var->green.length = var->blue.length = 8; break; case 32: fix->type = FB_TYPE_PACKED_PIXELS; fix->visual = FB_VISUAL_DIRECTCOLOR; var->red.length = var->green.length = var->blue.length = var->transp.length = 8; var->blue.offset = 0; var->green.offset = 8; var->red.offset = 16; var->transp.offset = 24; break; default: break; } var->xres = var->xres_virtual = xres; var->yres = var->yres_virtual = yres; var->bits_per_pixel = bpp; strcpy(fix->id, "stifb"); info->fbops = &stifb_ops; info->screen_base = ioremap_nocache(REGION_BASE(fb,1), fix->smem_len); info->screen_size = fix->smem_len; info->flags = FBINFO_DEFAULT; info->pseudo_palette = &fb->pseudo_palette; /* This has to be done !!! */ if (fb_alloc_cmap(&info->cmap, NR_PALETTE, 0)) goto out_err1; stifb_init_display(fb); if (!request_mem_region(fix->smem_start, fix->smem_len, "stifb fb")) { printk(KERN_ERR "stifb: cannot reserve fb region 0x%04lx-0x%04lx\n", fix->smem_start, fix->smem_start+fix->smem_len); goto out_err2; } if (!request_mem_region(fix->mmio_start, fix->mmio_len, "stifb mmio")) { printk(KERN_ERR "stifb: cannot reserve sti mmio region 0x%04lx-0x%04lx\n", fix->mmio_start, fix->mmio_start+fix->mmio_len); goto out_err3; } if (register_framebuffer(&fb->info) < 0) goto out_err4; sti->info = info; /* save for unregister_framebuffer() */ printk(KERN_INFO "fb%d: %s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n", fb->info.node, fix->id, var->xres, var->yres, var->bits_per_pixel, sti->outptr.dev_name, fb->id, fix->mmio_start); return 0; out_err4: release_mem_region(fix->mmio_start, fix->mmio_len); out_err3: release_mem_region(fix->smem_start, fix->smem_len); out_err2: fb_dealloc_cmap(&info->cmap); out_err1: iounmap(info->screen_base); out_err0: kfree(fb); return -ENXIO; } static int stifb_disabled __initdata; int __init stifb_setup(char *options); static int __init stifb_init(void) { struct sti_struct *sti; struct sti_struct *def_sti; int i; #ifndef MODULE char *option = NULL; if (fb_get_options("stifb", &option)) return -ENODEV; stifb_setup(option); #endif if (stifb_disabled) { printk(KERN_INFO "stifb: disabled by \"stifb=off\" kernel parameter\n"); return -ENXIO; } def_sti = sti_get_rom(0); if (def_sti) { for (i = 1; i <= MAX_STI_ROMS; i++) { sti = sti_get_rom(i); if (!sti) break; if (sti == def_sti) { stifb_init_fb(sti, stifb_bpp_pref[i - 1]); break; } } } for (i = 1; i <= MAX_STI_ROMS; i++) { sti = sti_get_rom(i); if (!sti) break; if (sti == def_sti) continue; stifb_init_fb(sti, stifb_bpp_pref[i - 1]); } return 0; } /* * Cleanup */ static void __exit stifb_cleanup(void) { struct sti_struct *sti; int i; for (i = 1; i <= MAX_STI_ROMS; i++) { sti = sti_get_rom(i); if (!sti) break; if (sti->info) { struct fb_info *info = sti->info; unregister_framebuffer(sti->info); release_mem_region(info->fix.mmio_start, info->fix.mmio_len); release_mem_region(info->fix.smem_start, info->fix.smem_len); if (info->screen_base) iounmap(info->screen_base); fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } sti->info = NULL; } } int __init stifb_setup(char *options) { int i; if (!options || !*options) return 1; if (strncmp(options, "off", 3) == 0) { stifb_disabled = 1; options += 3; } if (strncmp(options, "bpp", 3) == 0) { options += 3; for (i = 0; i < MAX_STI_ROMS; i++) { if (*options++ != ':') break; stifb_bpp_pref[i] = simple_strtoul(options, &options, 10); } } return 1; } __setup("stifb=", stifb_setup); module_init(stifb_init); module_exit(stifb_cleanup); MODULE_AUTHOR("Helge Deller <deller@gmx.de>, Thomas Bogendoerfer <tsbogend@alpha.franken.de>"); MODULE_DESCRIPTION("Framebuffer driver for HP's NGLE series graphics cards in HP PARISC machines"); MODULE_LICENSE("GPL v2");
gpl-2.0
yank555-lu/private_msm8660_ics
arch/avr32/boards/atngw100/evklcd10x.c
10911
4643
/* * Board-specific setup code for the ATEVKLCD10X addon board to the ATNGW100 * Network Gateway * * Copyright (C) 2008 Atmel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/init.h> #include <linux/linkage.h> #include <linux/gpio.h> #include <linux/fb.h> #include <linux/platform_device.h> #include <video/atmel_lcdc.h> #include <asm/setup.h> #include <mach/at32ap700x.h> #include <mach/portmux.h> #include <mach/board.h> #include <sound/atmel-ac97c.h> static struct ac97c_platform_data __initdata ac97c0_data = { .reset_pin = GPIO_PIN_PB(19), }; #ifdef CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA static struct fb_videomode __initdata tcg057vglad_modes[] = { { .name = "640x480 @ 50", .refresh = 50, .xres = 640, .yres = 480, .pixclock = KHZ2PICOS(25180), .left_margin = 64, .right_margin = 96, .upper_margin = 34, .lower_margin = 11, .hsync_len = 64, .vsync_len = 15, .sync = 0, .vmode = FB_VMODE_NONINTERLACED, }, }; static struct fb_monspecs __initdata atevklcd10x_default_monspecs = { .manufacturer = "KYO", .monitor = "TCG057VGLAD", .modedb = tcg057vglad_modes, .modedb_len = ARRAY_SIZE(tcg057vglad_modes), .hfmin = 19948, .hfmax = 31478, .vfmin = 50, .vfmax = 67, .dclkmax = 28330000, }; static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = { .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE | ATMEL_LCDC_MEMOR_BIG), .default_monspecs = &atevklcd10x_default_monspecs, .guard_time = 2, }; #elif CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA static struct fb_videomode __initdata tcg057qvlad_modes[] = { { .name = "320x240 @ 50", .refresh = 50, .xres = 320, .yres = 240, .pixclock = KHZ2PICOS(6300), .left_margin = 34, .right_margin = 46, .upper_margin = 7, .lower_margin = 15, .hsync_len = 64, .vsync_len = 12, .sync = 0, .vmode = FB_VMODE_NONINTERLACED, }, }; static struct fb_monspecs __initdata atevklcd10x_default_monspecs = { .manufacturer = "KYO", .monitor = "TCG057QVLAD", .modedb = tcg057qvlad_modes, .modedb_len = ARRAY_SIZE(tcg057qvlad_modes), .hfmin = 19948, .hfmax = 31478, .vfmin = 50, .vfmax = 67, .dclkmax = 7000000, }; static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = { .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE | ATMEL_LCDC_MEMOR_BIG), .default_monspecs = &atevklcd10x_default_monspecs, .guard_time = 2, }; #elif CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA static struct fb_videomode __initdata ph320240t_modes[] = { { .name = "320x240 @ 60", .refresh = 60, .xres = 320, .yres = 240, .pixclock = KHZ2PICOS(6300), .left_margin = 38, .right_margin = 20, .upper_margin = 15, .lower_margin = 5, .hsync_len = 30, .vsync_len = 3, .sync = 0, .vmode = FB_VMODE_NONINTERLACED, }, }; static struct fb_monspecs __initdata atevklcd10x_default_monspecs = { .manufacturer = "POW", .monitor = "PH320240T", .modedb = ph320240t_modes, .modedb_len = ARRAY_SIZE(ph320240t_modes), .hfmin = 14400, .hfmax = 21600, .vfmin = 50, .vfmax = 90, .dclkmax = 6400000, }; static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = { .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE | ATMEL_LCDC_MEMOR_BIG), .default_monspecs = &atevklcd10x_default_monspecs, .guard_time = 2, }; #endif static void atevklcd10x_lcdc_power_control(int on) { gpio_set_value(GPIO_PIN_PB(15), on); } static int __init atevklcd10x_init(void) { /* PB15 is connected to the enable line on the boost regulator * controlling the backlight for the LCD panel. */ at32_select_gpio(GPIO_PIN_PB(15), AT32_GPIOF_OUTPUT); gpio_request(GPIO_PIN_PB(15), "backlight"); gpio_direction_output(GPIO_PIN_PB(15), 0); atevklcd10x_lcdc_data.atmel_lcdfb_power_control = atevklcd10x_lcdc_power_control; at32_add_device_lcdc(0, &atevklcd10x_lcdc_data, fbmem_start, fbmem_size, #ifdef CONFIG_BOARD_ATNGW100_MKII ATMEL_LCDC_PRI_18BIT | ATMEL_LCDC_PC_DVAL #else ATMEL_LCDC_ALT_18BIT | ATMEL_LCDC_PE_DVAL #endif ); at32_add_device_ac97c(0, &ac97c0_data, AC97C_BOTH); return 0; } postcore_initcall(atevklcd10x_init);
gpl-2.0
wzhy90/android_kernel_sony_msm8974ab
drivers/net/irda/ma600-sir.c
12447
6887
/********************************************************************* * * Filename: ma600.c * Version: 0.1 * Description: Implementation of the MA600 dongle * Status: Experimental. * Author: Leung <95Etwl@alumni.ee.ust.hk> http://www.engsvr.ust/~eetwl95 * Created at: Sat Jun 10 20:02:35 2000 * Modified at: Sat Aug 16 09:34:13 2003 * Modified by: Martin Diehl <mad@mdiehl.de> (modified for new sir_dev) * * Note: very thanks to Mr. Maru Wang <maru@mobileaction.com.tw> for providing * information on the MA600 dongle * * Copyright (c) 2000 Leung, All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * ********************************************************************/ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <net/irda/irda.h> #include "sir-dev.h" static int ma600_open(struct sir_dev *); static int ma600_close(struct sir_dev *); static int ma600_change_speed(struct sir_dev *, unsigned); static int ma600_reset(struct sir_dev *); /* control byte for MA600 */ #define MA600_9600 0x00 #define MA600_19200 0x01 #define MA600_38400 0x02 #define MA600_57600 0x03 #define MA600_115200 0x04 #define MA600_DEV_ID1 0x05 #define MA600_DEV_ID2 0x06 #define MA600_2400 0x08 static struct dongle_driver ma600 = { .owner = THIS_MODULE, .driver_name = "MA600", .type = IRDA_MA600_DONGLE, .open = ma600_open, .close = ma600_close, .reset = ma600_reset, .set_speed = ma600_change_speed, }; static int __init ma600_sir_init(void) { IRDA_DEBUG(2, "%s()\n", __func__); return irda_register_dongle(&ma600); } static void __exit ma600_sir_cleanup(void) { IRDA_DEBUG(2, "%s()\n", __func__); irda_unregister_dongle(&ma600); } /* Power on: (0) Clear RTS and DTR for 1 second (1) Set RTS and DTR for 1 second (2) 9600 bps now Note: assume RTS, DTR are clear before */ static int ma600_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; IRDA_DEBUG(2, "%s()\n", __func__); sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Explicitly set the speeds we can accept */ qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400 |IR_57600|IR_115200; /* Hm, 0x01 means 10ms - for >= 1ms we would need 0x07 */ qos->min_turn_time.bits = 0x01; /* Needs at least 1 ms */ irda_qos_bits_to_value(qos); /* irda thread waits 50 msec for power settling */ return 0; } static int ma600_close(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); return 0; } static __u8 get_control_byte(__u32 speed) { __u8 byte; switch (speed) { default: case 115200: byte = MA600_115200; break; case 57600: byte = MA600_57600; break; case 38400: byte = MA600_38400; break; case 19200: byte = MA600_19200; break; case 9600: byte = MA600_9600; break; case 2400: byte = MA600_2400; break; } return byte; } /* * Function ma600_change_speed (dev, speed) * * Set the speed for the MA600 type dongle. * * The dongle has already been reset to a known state (dongle default) * We cycle through speeds by pulsing RTS low and then high. */ /* * Function ma600_change_speed (dev, speed) * * Set the speed for the MA600 type dongle. * * Algorithm * 1. Reset (already done by irda thread state machine) * 2. clear RTS, set DTR and wait for 1ms * 3. send Control Byte to the MA600 through TXD to set new baud rate * wait until the stop bit of Control Byte is sent (for 9600 baud rate, * it takes about 10 msec) * 4. set RTS, set DTR (return to NORMAL Operation) * 5. wait at least 10 ms, new setting (baud rate, etc) takes effect here * after */ /* total delays are only about 20ms - let's just sleep for now to * avoid the state machine complexity before we get things working */ static int ma600_change_speed(struct sir_dev *dev, unsigned speed) { u8 byte; IRDA_DEBUG(2, "%s(), speed=%d (was %d)\n", __func__, speed, dev->speed); /* dongle already reset, dongle and port at default speed (9600) */ /* Set RTS low for 1 ms */ sirdev_set_dtr_rts(dev, TRUE, FALSE); mdelay(1); /* Write control byte */ byte = get_control_byte(speed); sirdev_raw_write(dev, &byte, sizeof(byte)); /* Wait at least 10ms: fake wait_until_sent - 10 bits at 9600 baud*/ msleep(15); /* old ma600 uses 15ms */ #if 1 /* read-back of the control byte. ma600 is the first dongle driver * which uses this so there might be some unidentified issues. * Disable this in case of problems with readback. */ sirdev_raw_read(dev, &byte, sizeof(byte)); if (byte != get_control_byte(speed)) { IRDA_WARNING("%s(): bad control byte read-back %02x != %02x\n", __func__, (unsigned) byte, (unsigned) get_control_byte(speed)); return -1; } else IRDA_DEBUG(2, "%s() control byte write read OK\n", __func__); #endif /* Set DTR, Set RTS */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Wait at least 10ms */ msleep(10); /* dongle is now switched to the new speed */ dev->speed = speed; return 0; } /* * Function ma600_reset (dev) * * This function resets the ma600 dongle. * * Algorithm: * 0. DTR=0, RTS=1 and wait 10 ms * 1. DTR=1, RTS=1 and wait 10 ms * 2. 9600 bps now */ /* total delays are only about 20ms - let's just sleep for now to * avoid the state machine complexity before we get things working */ static int ma600_reset(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); /* Reset the dongle : set DTR low for 10 ms */ sirdev_set_dtr_rts(dev, FALSE, TRUE); msleep(10); /* Go back to normal mode */ sirdev_set_dtr_rts(dev, TRUE, TRUE); msleep(10); dev->speed = 9600; /* That's the dongle-default */ return 0; } MODULE_AUTHOR("Leung <95Etwl@alumni.ee.ust.hk> http://www.engsvr.ust/~eetwl95"); MODULE_DESCRIPTION("MA600 dongle driver version 0.1"); MODULE_LICENSE("GPL"); MODULE_ALIAS("irda-dongle-11"); /* IRDA_MA600_DONGLE */ module_init(ma600_sir_init); module_exit(ma600_sir_cleanup);
gpl-2.0
TeamExodus/kernel_yu_msm8916
fs/ntfs/mst.c
15007
7109
/* * mst.c - NTFS multi sector transfer protection handling code. Part of the * Linux-NTFS project. * * Copyright (c) 2001-2004 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "ntfs.h" /** * post_read_mst_fixup - deprotect multi sector transfer protected data * @b: pointer to the data to deprotect * @size: size in bytes of @b * * Perform the necessary post read multi sector transfer fixup and detect the * presence of incomplete multi sector transfers. - In that case, overwrite the * magic of the ntfs record header being processed with "BAAD" (in memory only!) * and abort processing. * * Return 0 on success and -EINVAL on error ("BAAD" magic will be present). * * NOTE: We consider the absence / invalidity of an update sequence array to * mean that the structure is not protected at all and hence doesn't need to * be fixed up. Thus, we return success and not failure in this case. This is * in contrast to pre_write_mst_fixup(), see below. */ int post_read_mst_fixup(NTFS_RECORD *b, const u32 size) { u16 usa_ofs, usa_count, usn; u16 *usa_pos, *data_pos; /* Setup the variables. */ usa_ofs = le16_to_cpu(b->usa_ofs); /* Decrement usa_count to get number of fixups. */ usa_count = le16_to_cpu(b->usa_count) - 1; /* Size and alignment checks. */ if ( size & (NTFS_BLOCK_SIZE - 1) || usa_ofs & 1 || usa_ofs + (usa_count * 2) > size || (size >> NTFS_BLOCK_SIZE_BITS) != usa_count) return 0; /* Position of usn in update sequence array. */ usa_pos = (u16*)b + usa_ofs/sizeof(u16); /* * The update sequence number which has to be equal to each of the * u16 values before they are fixed up. Note no need to care for * endianness since we are comparing and moving data for on disk * structures which means the data is consistent. - If it is * consistenty the wrong endianness it doesn't make any difference. */ usn = *usa_pos; /* * Position in protected data of first u16 that needs fixing up. */ data_pos = (u16*)b + NTFS_BLOCK_SIZE/sizeof(u16) - 1; /* * Check for incomplete multi sector transfer(s). */ while (usa_count--) { if (*data_pos != usn) { /* * Incomplete multi sector transfer detected! )-: * Set the magic to "BAAD" and return failure. * Note that magic_BAAD is already converted to le32. */ b->magic = magic_BAAD; return -EINVAL; } data_pos += NTFS_BLOCK_SIZE/sizeof(u16); } /* Re-setup the variables. */ usa_count = le16_to_cpu(b->usa_count) - 1; data_pos = (u16*)b + NTFS_BLOCK_SIZE/sizeof(u16) - 1; /* Fixup all sectors. */ while (usa_count--) { /* * Increment position in usa and restore original data from * the usa into the data buffer. */ *data_pos = *(++usa_pos); /* Increment position in data as well. */ data_pos += NTFS_BLOCK_SIZE/sizeof(u16); } return 0; } /** * pre_write_mst_fixup - apply multi sector transfer protection * @b: pointer to the data to protect * @size: size in bytes of @b * * Perform the necessary pre write multi sector transfer fixup on the data * pointer to by @b of @size. * * Return 0 if fixup applied (success) or -EINVAL if no fixup was performed * (assumed not needed). This is in contrast to post_read_mst_fixup() above. * * NOTE: We consider the absence / invalidity of an update sequence array to * mean that the structure is not subject to protection and hence doesn't need * to be fixed up. This means that you have to create a valid update sequence * array header in the ntfs record before calling this function, otherwise it * will fail (the header needs to contain the position of the update sequence * array together with the number of elements in the array). You also need to * initialise the update sequence number before calling this function * otherwise a random word will be used (whatever was in the record at that * position at that time). */ int pre_write_mst_fixup(NTFS_RECORD *b, const u32 size) { le16 *usa_pos, *data_pos; u16 usa_ofs, usa_count, usn; le16 le_usn; /* Sanity check + only fixup if it makes sense. */ if (!b || ntfs_is_baad_record(b->magic) || ntfs_is_hole_record(b->magic)) return -EINVAL; /* Setup the variables. */ usa_ofs = le16_to_cpu(b->usa_ofs); /* Decrement usa_count to get number of fixups. */ usa_count = le16_to_cpu(b->usa_count) - 1; /* Size and alignment checks. */ if ( size & (NTFS_BLOCK_SIZE - 1) || usa_ofs & 1 || usa_ofs + (usa_count * 2) > size || (size >> NTFS_BLOCK_SIZE_BITS) != usa_count) return -EINVAL; /* Position of usn in update sequence array. */ usa_pos = (le16*)((u8*)b + usa_ofs); /* * Cyclically increment the update sequence number * (skipping 0 and -1, i.e. 0xffff). */ usn = le16_to_cpup(usa_pos) + 1; if (usn == 0xffff || !usn) usn = 1; le_usn = cpu_to_le16(usn); *usa_pos = le_usn; /* Position in data of first u16 that needs fixing up. */ data_pos = (le16*)b + NTFS_BLOCK_SIZE/sizeof(le16) - 1; /* Fixup all sectors. */ while (usa_count--) { /* * Increment the position in the usa and save the * original data from the data buffer into the usa. */ *(++usa_pos) = *data_pos; /* Apply fixup to data. */ *data_pos = le_usn; /* Increment position in data as well. */ data_pos += NTFS_BLOCK_SIZE/sizeof(le16); } return 0; } /** * post_write_mst_fixup - fast deprotect multi sector transfer protected data * @b: pointer to the data to deprotect * * Perform the necessary post write multi sector transfer fixup, not checking * for any errors, because we assume we have just used pre_write_mst_fixup(), * thus the data will be fine or we would never have gotten here. */ void post_write_mst_fixup(NTFS_RECORD *b) { le16 *usa_pos, *data_pos; u16 usa_ofs = le16_to_cpu(b->usa_ofs); u16 usa_count = le16_to_cpu(b->usa_count) - 1; /* Position of usn in update sequence array. */ usa_pos = (le16*)b + usa_ofs/sizeof(le16); /* Position in protected data of first u16 that needs fixing up. */ data_pos = (le16*)b + NTFS_BLOCK_SIZE/sizeof(le16) - 1; /* Fixup all sectors. */ while (usa_count--) { /* * Increment position in usa and restore original data from * the usa into the data buffer. */ *data_pos = *(++usa_pos); /* Increment position in data as well. */ data_pos += NTFS_BLOCK_SIZE/sizeof(le16); } }
gpl-2.0
sleshepic/Note_2_l900
fs/ntfs/mst.c
15007
7109
/* * mst.c - NTFS multi sector transfer protection handling code. Part of the * Linux-NTFS project. * * Copyright (c) 2001-2004 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "ntfs.h" /** * post_read_mst_fixup - deprotect multi sector transfer protected data * @b: pointer to the data to deprotect * @size: size in bytes of @b * * Perform the necessary post read multi sector transfer fixup and detect the * presence of incomplete multi sector transfers. - In that case, overwrite the * magic of the ntfs record header being processed with "BAAD" (in memory only!) * and abort processing. * * Return 0 on success and -EINVAL on error ("BAAD" magic will be present). * * NOTE: We consider the absence / invalidity of an update sequence array to * mean that the structure is not protected at all and hence doesn't need to * be fixed up. Thus, we return success and not failure in this case. This is * in contrast to pre_write_mst_fixup(), see below. */ int post_read_mst_fixup(NTFS_RECORD *b, const u32 size) { u16 usa_ofs, usa_count, usn; u16 *usa_pos, *data_pos; /* Setup the variables. */ usa_ofs = le16_to_cpu(b->usa_ofs); /* Decrement usa_count to get number of fixups. */ usa_count = le16_to_cpu(b->usa_count) - 1; /* Size and alignment checks. */ if ( size & (NTFS_BLOCK_SIZE - 1) || usa_ofs & 1 || usa_ofs + (usa_count * 2) > size || (size >> NTFS_BLOCK_SIZE_BITS) != usa_count) return 0; /* Position of usn in update sequence array. */ usa_pos = (u16*)b + usa_ofs/sizeof(u16); /* * The update sequence number which has to be equal to each of the * u16 values before they are fixed up. Note no need to care for * endianness since we are comparing and moving data for on disk * structures which means the data is consistent. - If it is * consistenty the wrong endianness it doesn't make any difference. */ usn = *usa_pos; /* * Position in protected data of first u16 that needs fixing up. */ data_pos = (u16*)b + NTFS_BLOCK_SIZE/sizeof(u16) - 1; /* * Check for incomplete multi sector transfer(s). */ while (usa_count--) { if (*data_pos != usn) { /* * Incomplete multi sector transfer detected! )-: * Set the magic to "BAAD" and return failure. * Note that magic_BAAD is already converted to le32. */ b->magic = magic_BAAD; return -EINVAL; } data_pos += NTFS_BLOCK_SIZE/sizeof(u16); } /* Re-setup the variables. */ usa_count = le16_to_cpu(b->usa_count) - 1; data_pos = (u16*)b + NTFS_BLOCK_SIZE/sizeof(u16) - 1; /* Fixup all sectors. */ while (usa_count--) { /* * Increment position in usa and restore original data from * the usa into the data buffer. */ *data_pos = *(++usa_pos); /* Increment position in data as well. */ data_pos += NTFS_BLOCK_SIZE/sizeof(u16); } return 0; } /** * pre_write_mst_fixup - apply multi sector transfer protection * @b: pointer to the data to protect * @size: size in bytes of @b * * Perform the necessary pre write multi sector transfer fixup on the data * pointer to by @b of @size. * * Return 0 if fixup applied (success) or -EINVAL if no fixup was performed * (assumed not needed). This is in contrast to post_read_mst_fixup() above. * * NOTE: We consider the absence / invalidity of an update sequence array to * mean that the structure is not subject to protection and hence doesn't need * to be fixed up. This means that you have to create a valid update sequence * array header in the ntfs record before calling this function, otherwise it * will fail (the header needs to contain the position of the update sequence * array together with the number of elements in the array). You also need to * initialise the update sequence number before calling this function * otherwise a random word will be used (whatever was in the record at that * position at that time). */ int pre_write_mst_fixup(NTFS_RECORD *b, const u32 size) { le16 *usa_pos, *data_pos; u16 usa_ofs, usa_count, usn; le16 le_usn; /* Sanity check + only fixup if it makes sense. */ if (!b || ntfs_is_baad_record(b->magic) || ntfs_is_hole_record(b->magic)) return -EINVAL; /* Setup the variables. */ usa_ofs = le16_to_cpu(b->usa_ofs); /* Decrement usa_count to get number of fixups. */ usa_count = le16_to_cpu(b->usa_count) - 1; /* Size and alignment checks. */ if ( size & (NTFS_BLOCK_SIZE - 1) || usa_ofs & 1 || usa_ofs + (usa_count * 2) > size || (size >> NTFS_BLOCK_SIZE_BITS) != usa_count) return -EINVAL; /* Position of usn in update sequence array. */ usa_pos = (le16*)((u8*)b + usa_ofs); /* * Cyclically increment the update sequence number * (skipping 0 and -1, i.e. 0xffff). */ usn = le16_to_cpup(usa_pos) + 1; if (usn == 0xffff || !usn) usn = 1; le_usn = cpu_to_le16(usn); *usa_pos = le_usn; /* Position in data of first u16 that needs fixing up. */ data_pos = (le16*)b + NTFS_BLOCK_SIZE/sizeof(le16) - 1; /* Fixup all sectors. */ while (usa_count--) { /* * Increment the position in the usa and save the * original data from the data buffer into the usa. */ *(++usa_pos) = *data_pos; /* Apply fixup to data. */ *data_pos = le_usn; /* Increment position in data as well. */ data_pos += NTFS_BLOCK_SIZE/sizeof(le16); } return 0; } /** * post_write_mst_fixup - fast deprotect multi sector transfer protected data * @b: pointer to the data to deprotect * * Perform the necessary post write multi sector transfer fixup, not checking * for any errors, because we assume we have just used pre_write_mst_fixup(), * thus the data will be fine or we would never have gotten here. */ void post_write_mst_fixup(NTFS_RECORD *b) { le16 *usa_pos, *data_pos; u16 usa_ofs = le16_to_cpu(b->usa_ofs); u16 usa_count = le16_to_cpu(b->usa_count) - 1; /* Position of usn in update sequence array. */ usa_pos = (le16*)b + usa_ofs/sizeof(le16); /* Position in protected data of first u16 that needs fixing up. */ data_pos = (le16*)b + NTFS_BLOCK_SIZE/sizeof(le16) - 1; /* Fixup all sectors. */ while (usa_count--) { /* * Increment position in usa and restore original data from * the usa into the data buffer. */ *data_pos = *(++usa_pos); /* Increment position in data as well. */ data_pos += NTFS_BLOCK_SIZE/sizeof(le16); } }
gpl-2.0
ivanich/wireless-testing_torvalds
security/keys/request_key.c
160
19801
/* Request a key from userspace * * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * See Documentation/security/keys-request-key.txt */ #include <linux/module.h> #include <linux/sched.h> #include <linux/kmod.h> #include <linux/err.h> #include <linux/keyctl.h> #include <linux/slab.h> #include "internal.h" #define key_negative_timeout 60 /* default timeout on a negative key's existence */ /* * wait_on_bit() sleep function for uninterruptible waiting */ static int key_wait_bit(void *flags) { schedule(); return 0; } /* * wait_on_bit() sleep function for interruptible waiting */ static int key_wait_bit_intr(void *flags) { schedule(); return signal_pending(current) ? -ERESTARTSYS : 0; } /** * complete_request_key - Complete the construction of a key. * @cons: The key construction record. * @error: The success or failute of the construction. * * Complete the attempt to construct a key. The key will be negated * if an error is indicated. The authorisation key will be revoked * unconditionally. */ void complete_request_key(struct key_construction *cons, int error) { kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error); if (error < 0) key_negate_and_link(cons->key, key_negative_timeout, NULL, cons->authkey); else key_revoke(cons->authkey); key_put(cons->key); key_put(cons->authkey); kfree(cons); } EXPORT_SYMBOL(complete_request_key); /* * Initialise a usermode helper that is going to have a specific session * keyring. * * This is called in context of freshly forked kthread before kernel_execve(), * so we can simply install the desired session_keyring at this point. */ static int umh_keys_init(struct subprocess_info *info, struct cred *cred) { struct key *keyring = info->data; return install_session_keyring_to_cred(cred, keyring); } /* * Clean up a usermode helper with session keyring. */ static void umh_keys_cleanup(struct subprocess_info *info) { struct key *keyring = info->data; key_put(keyring); } /* * Call a usermode helper with a specific session keyring. */ static int call_usermodehelper_keys(char *path, char **argv, char **envp, struct key *session_keyring, int wait) { return call_usermodehelper_fns(path, argv, envp, wait, umh_keys_init, umh_keys_cleanup, key_get(session_keyring)); } /* * Request userspace finish the construction of a key * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>" */ static int call_sbin_request_key(struct key_construction *cons, const char *op, void *aux) { const struct cred *cred = current_cred(); key_serial_t prkey, sskey; struct key *key = cons->key, *authkey = cons->authkey, *keyring, *session; char *argv[9], *envp[3], uid_str[12], gid_str[12]; char key_str[12], keyring_str[3][12]; char desc[20]; int ret, i; kenter("{%d},{%d},%s", key->serial, authkey->serial, op); ret = install_user_keyrings(); if (ret < 0) goto error_alloc; /* allocate a new session keyring */ sprintf(desc, "_req.%u", key->serial); cred = get_current_cred(); keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred, KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ, KEY_ALLOC_QUOTA_OVERRUN, NULL); put_cred(cred); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto error_alloc; } /* attach the auth key to the session keyring */ ret = key_link(keyring, authkey); if (ret < 0) goto error_link; /* record the UID and GID */ sprintf(uid_str, "%d", from_kuid(&init_user_ns, cred->fsuid)); sprintf(gid_str, "%d", from_kgid(&init_user_ns, cred->fsgid)); /* we say which key is under construction */ sprintf(key_str, "%d", key->serial); /* we specify the process's default keyrings */ sprintf(keyring_str[0], "%d", cred->thread_keyring ? cred->thread_keyring->serial : 0); prkey = 0; if (cred->process_keyring) prkey = cred->process_keyring->serial; sprintf(keyring_str[1], "%d", prkey); rcu_read_lock(); session = rcu_dereference(cred->session_keyring); if (!session) session = cred->user->session_keyring; sskey = session->serial; rcu_read_unlock(); sprintf(keyring_str[2], "%d", sskey); /* set up a minimal environment */ i = 0; envp[i++] = "HOME=/"; envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; envp[i] = NULL; /* set up the argument list */ i = 0; argv[i++] = "/sbin/request-key"; argv[i++] = (char *) op; argv[i++] = key_str; argv[i++] = uid_str; argv[i++] = gid_str; argv[i++] = keyring_str[0]; argv[i++] = keyring_str[1]; argv[i++] = keyring_str[2]; argv[i] = NULL; /* do it */ ret = call_usermodehelper_keys(argv[0], argv, envp, keyring, UMH_WAIT_PROC); kdebug("usermode -> 0x%x", ret); if (ret >= 0) { /* ret is the exit/wait code */ if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags) || key_validate(key) < 0) ret = -ENOKEY; else /* ignore any errors from userspace if the key was * instantiated */ ret = 0; } error_link: key_put(keyring); error_alloc: complete_request_key(cons, ret); kleave(" = %d", ret); return ret; } /* * Call out to userspace for key construction. * * Program failure is ignored in favour of key status. */ static int construct_key(struct key *key, const void *callout_info, size_t callout_len, void *aux, struct key *dest_keyring) { struct key_construction *cons; request_key_actor_t actor; struct key *authkey; int ret; kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux); cons = kmalloc(sizeof(*cons), GFP_KERNEL); if (!cons) return -ENOMEM; /* allocate an authorisation key */ authkey = request_key_auth_new(key, callout_info, callout_len, dest_keyring); if (IS_ERR(authkey)) { kfree(cons); ret = PTR_ERR(authkey); authkey = NULL; } else { cons->authkey = key_get(authkey); cons->key = key_get(key); /* make the call */ actor = call_sbin_request_key; if (key->type->request_key) actor = key->type->request_key; ret = actor(cons, "create", aux); /* check that the actor called complete_request_key() prior to * returning an error */ WARN_ON(ret < 0 && !test_bit(KEY_FLAG_REVOKED, &authkey->flags)); key_put(authkey); } kleave(" = %d", ret); return ret; } /* * Get the appropriate destination keyring for the request. * * The keyring selected is returned with an extra reference upon it which the * caller must release. */ static void construct_get_dest_keyring(struct key **_dest_keyring) { struct request_key_auth *rka; const struct cred *cred = current_cred(); struct key *dest_keyring = *_dest_keyring, *authkey; kenter("%p", dest_keyring); /* find the appropriate keyring */ if (dest_keyring) { /* the caller supplied one */ key_get(dest_keyring); } else { /* use a default keyring; falling through the cases until we * find one that we actually have */ switch (cred->jit_keyring) { case KEY_REQKEY_DEFL_DEFAULT: case KEY_REQKEY_DEFL_REQUESTOR_KEYRING: if (cred->request_key_auth) { authkey = cred->request_key_auth; down_read(&authkey->sem); rka = authkey->payload.data; if (!test_bit(KEY_FLAG_REVOKED, &authkey->flags)) dest_keyring = key_get(rka->dest_keyring); up_read(&authkey->sem); if (dest_keyring) break; } case KEY_REQKEY_DEFL_THREAD_KEYRING: dest_keyring = key_get(cred->thread_keyring); if (dest_keyring) break; case KEY_REQKEY_DEFL_PROCESS_KEYRING: dest_keyring = key_get(cred->process_keyring); if (dest_keyring) break; case KEY_REQKEY_DEFL_SESSION_KEYRING: rcu_read_lock(); dest_keyring = key_get( rcu_dereference(cred->session_keyring)); rcu_read_unlock(); if (dest_keyring) break; case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: dest_keyring = key_get(cred->user->session_keyring); break; case KEY_REQKEY_DEFL_USER_KEYRING: dest_keyring = key_get(cred->user->uid_keyring); break; case KEY_REQKEY_DEFL_GROUP_KEYRING: default: BUG(); } } *_dest_keyring = dest_keyring; kleave(" [dk %d]", key_serial(dest_keyring)); return; } /* * Allocate a new key in under-construction state and attempt to link it in to * the requested keyring. * * May return a key that's already under construction instead if there was a * race between two thread calling request_key(). */ static int construct_alloc_key(struct key_type *type, const char *description, struct key *dest_keyring, unsigned long flags, struct key_user *user, struct key **_key) { const struct cred *cred = current_cred(); unsigned long prealloc; struct key *key; key_perm_t perm; key_ref_t key_ref; int ret; kenter("%s,%s,,,", type->name, description); *_key = NULL; mutex_lock(&user->cons_lock); perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; perm |= KEY_USR_VIEW; if (type->read) perm |= KEY_POS_READ; if (type == &key_type_keyring || type->update) perm |= KEY_POS_WRITE; key = key_alloc(type, description, cred->fsuid, cred->fsgid, cred, perm, flags); if (IS_ERR(key)) goto alloc_failed; set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags); if (dest_keyring) { ret = __key_link_begin(dest_keyring, type, description, &prealloc); if (ret < 0) goto link_prealloc_failed; } /* attach the key to the destination keyring under lock, but we do need * to do another check just in case someone beat us to it whilst we * waited for locks */ mutex_lock(&key_construction_mutex); key_ref = search_process_keyrings(type, description, type->match, cred); if (!IS_ERR(key_ref)) goto key_already_present; if (dest_keyring) __key_link(dest_keyring, key, &prealloc); mutex_unlock(&key_construction_mutex); if (dest_keyring) __key_link_end(dest_keyring, type, prealloc); mutex_unlock(&user->cons_lock); *_key = key; kleave(" = 0 [%d]", key_serial(key)); return 0; /* the key is now present - we tell the caller that we found it by * returning -EINPROGRESS */ key_already_present: key_put(key); mutex_unlock(&key_construction_mutex); key = key_ref_to_ptr(key_ref); if (dest_keyring) { ret = __key_link_check_live_key(dest_keyring, key); if (ret == 0) __key_link(dest_keyring, key, &prealloc); __key_link_end(dest_keyring, type, prealloc); if (ret < 0) goto link_check_failed; } mutex_unlock(&user->cons_lock); *_key = key; kleave(" = -EINPROGRESS [%d]", key_serial(key)); return -EINPROGRESS; link_check_failed: mutex_unlock(&user->cons_lock); key_put(key); kleave(" = %d [linkcheck]", ret); return ret; link_prealloc_failed: mutex_unlock(&user->cons_lock); kleave(" = %d [prelink]", ret); return ret; alloc_failed: mutex_unlock(&user->cons_lock); kleave(" = %ld", PTR_ERR(key)); return PTR_ERR(key); } /* * Commence key construction. */ static struct key *construct_key_and_link(struct key_type *type, const char *description, const char *callout_info, size_t callout_len, void *aux, struct key *dest_keyring, unsigned long flags) { struct key_user *user; struct key *key; int ret; kenter(""); user = key_user_lookup(current_fsuid()); if (!user) return ERR_PTR(-ENOMEM); construct_get_dest_keyring(&dest_keyring); ret = construct_alloc_key(type, description, dest_keyring, flags, user, &key); key_user_put(user); if (ret == 0) { ret = construct_key(key, callout_info, callout_len, aux, dest_keyring); if (ret < 0) { kdebug("cons failed"); goto construction_failed; } } else if (ret == -EINPROGRESS) { ret = 0; } else { goto couldnt_alloc_key; } key_put(dest_keyring); kleave(" = key %d", key_serial(key)); return key; construction_failed: key_negate_and_link(key, key_negative_timeout, NULL, NULL); key_put(key); couldnt_alloc_key: key_put(dest_keyring); kleave(" = %d", ret); return ERR_PTR(ret); } /** * request_key_and_link - Request a key and cache it in a keyring. * @type: The type of key we want. * @description: The searchable description of the key. * @callout_info: The data to pass to the instantiation upcall (or NULL). * @callout_len: The length of callout_info. * @aux: Auxiliary data for the upcall. * @dest_keyring: Where to cache the key. * @flags: Flags to key_alloc(). * * A key matching the specified criteria is searched for in the process's * keyrings and returned with its usage count incremented if found. Otherwise, * if callout_info is not NULL, a key will be allocated and some service * (probably in userspace) will be asked to instantiate it. * * If successfully found or created, the key will be linked to the destination * keyring if one is provided. * * Returns a pointer to the key if successful; -EACCES, -ENOKEY, -EKEYREVOKED * or -EKEYEXPIRED if an inaccessible, negative, revoked or expired key was * found; -ENOKEY if no key was found and no @callout_info was given; -EDQUOT * if insufficient key quota was available to create a new key; or -ENOMEM if * insufficient memory was available. * * If the returned key was created, then it may still be under construction, * and wait_for_key_construction() should be used to wait for that to complete. */ struct key *request_key_and_link(struct key_type *type, const char *description, const void *callout_info, size_t callout_len, void *aux, struct key *dest_keyring, unsigned long flags) { const struct cred *cred = current_cred(); struct key *key; key_ref_t key_ref; int ret; kenter("%s,%s,%p,%zu,%p,%p,%lx", type->name, description, callout_info, callout_len, aux, dest_keyring, flags); /* search all the process keyrings for a key */ key_ref = search_process_keyrings(type, description, type->match, cred); if (!IS_ERR(key_ref)) { key = key_ref_to_ptr(key_ref); if (dest_keyring) { construct_get_dest_keyring(&dest_keyring); ret = key_link(dest_keyring, key); key_put(dest_keyring); if (ret < 0) { key_put(key); key = ERR_PTR(ret); goto error; } } } else if (PTR_ERR(key_ref) != -EAGAIN) { key = ERR_CAST(key_ref); } else { /* the search failed, but the keyrings were searchable, so we * should consult userspace if we can */ key = ERR_PTR(-ENOKEY); if (!callout_info) goto error; key = construct_key_and_link(type, description, callout_info, callout_len, aux, dest_keyring, flags); } error: kleave(" = %p", key); return key; } /** * wait_for_key_construction - Wait for construction of a key to complete * @key: The key being waited for. * @intr: Whether to wait interruptibly. * * Wait for a key to finish being constructed. * * Returns 0 if successful; -ERESTARTSYS if the wait was interrupted; -ENOKEY * if the key was negated; or -EKEYREVOKED or -EKEYEXPIRED if the key was * revoked or expired. */ int wait_for_key_construction(struct key *key, bool intr) { int ret; ret = wait_on_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT, intr ? key_wait_bit_intr : key_wait_bit, intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); if (ret < 0) return ret; if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) return key->type_data.reject_error; return key_validate(key); } EXPORT_SYMBOL(wait_for_key_construction); /** * request_key - Request a key and wait for construction * @type: Type of key. * @description: The searchable description of the key. * @callout_info: The data to pass to the instantiation upcall (or NULL). * * As for request_key_and_link() except that it does not add the returned key * to a keyring if found, new keys are always allocated in the user's quota, * the callout_info must be a NUL-terminated string and no auxiliary data can * be passed. * * Furthermore, it then works as wait_for_key_construction() to wait for the * completion of keys undergoing construction with a non-interruptible wait. */ struct key *request_key(struct key_type *type, const char *description, const char *callout_info) { struct key *key; size_t callout_len = 0; int ret; if (callout_info) callout_len = strlen(callout_info); key = request_key_and_link(type, description, callout_info, callout_len, NULL, NULL, KEY_ALLOC_IN_QUOTA); if (!IS_ERR(key)) { ret = wait_for_key_construction(key, false); if (ret < 0) { key_put(key); return ERR_PTR(ret); } } return key; } EXPORT_SYMBOL(request_key); /** * request_key_with_auxdata - Request a key with auxiliary data for the upcaller * @type: The type of key we want. * @description: The searchable description of the key. * @callout_info: The data to pass to the instantiation upcall (or NULL). * @callout_len: The length of callout_info. * @aux: Auxiliary data for the upcall. * * As for request_key_and_link() except that it does not add the returned key * to a keyring if found and new keys are always allocated in the user's quota. * * Furthermore, it then works as wait_for_key_construction() to wait for the * completion of keys undergoing construction with a non-interruptible wait. */ struct key *request_key_with_auxdata(struct key_type *type, const char *description, const void *callout_info, size_t callout_len, void *aux) { struct key *key; int ret; key = request_key_and_link(type, description, callout_info, callout_len, aux, NULL, KEY_ALLOC_IN_QUOTA); if (!IS_ERR(key)) { ret = wait_for_key_construction(key, false); if (ret < 0) { key_put(key); return ERR_PTR(ret); } } return key; } EXPORT_SYMBOL(request_key_with_auxdata); /* * request_key_async - Request a key (allow async construction) * @type: Type of key. * @description: The searchable description of the key. * @callout_info: The data to pass to the instantiation upcall (or NULL). * @callout_len: The length of callout_info. * * As for request_key_and_link() except that it does not add the returned key * to a keyring if found, new keys are always allocated in the user's quota and * no auxiliary data can be passed. * * The caller should call wait_for_key_construction() to wait for the * completion of the returned key if it is still undergoing construction. */ struct key *request_key_async(struct key_type *type, const char *description, const void *callout_info, size_t callout_len) { return request_key_and_link(type, description, callout_info, callout_len, NULL, NULL, KEY_ALLOC_IN_QUOTA); } EXPORT_SYMBOL(request_key_async); /* * request a key with auxiliary data for the upcaller (allow async construction) * @type: Type of key. * @description: The searchable description of the key. * @callout_info: The data to pass to the instantiation upcall (or NULL). * @callout_len: The length of callout_info. * @aux: Auxiliary data for the upcall. * * As for request_key_and_link() except that it does not add the returned key * to a keyring if found and new keys are always allocated in the user's quota. * * The caller should call wait_for_key_construction() to wait for the * completion of the returned key if it is still undergoing construction. */ struct key *request_key_async_with_auxdata(struct key_type *type, const char *description, const void *callout_info, size_t callout_len, void *aux) { return request_key_and_link(type, description, callout_info, callout_len, aux, NULL, KEY_ALLOC_IN_QUOTA); } EXPORT_SYMBOL(request_key_async_with_auxdata);
gpl-2.0
r1mikey/linux-picoxcell
tools/power/x86/turbostat/turbostat.c
160
84206
/* * turbostat -- show CPU frequency and C-state residency * on modern Intel turbo-capable processors. * * Copyright (c) 2013 Intel Corporation. * Len Brown <len.brown@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #define _GNU_SOURCE #include MSRHEADER #include <stdarg.h> #include <stdio.h> #include <err.h> #include <unistd.h> #include <sys/types.h> #include <sys/wait.h> #include <sys/stat.h> #include <sys/resource.h> #include <fcntl.h> #include <signal.h> #include <sys/time.h> #include <stdlib.h> #include <getopt.h> #include <dirent.h> #include <string.h> #include <ctype.h> #include <sched.h> #include <cpuid.h> #include <linux/capability.h> #include <errno.h> char *proc_stat = "/proc/stat"; unsigned int interval_sec = 5; unsigned int debug; unsigned int rapl_joules; unsigned int summary_only; unsigned int dump_only; unsigned int skip_c0; unsigned int skip_c1; unsigned int do_nhm_cstates; unsigned int do_snb_cstates; unsigned int do_knl_cstates; unsigned int do_pc2; unsigned int do_pc3; unsigned int do_pc6; unsigned int do_pc7; unsigned int do_c8_c9_c10; unsigned int do_skl_residency; unsigned int do_slm_cstates; unsigned int use_c1_residency_msr; unsigned int has_aperf; unsigned int has_epb; unsigned int units = 1000000; /* MHz etc */ unsigned int genuine_intel; unsigned int has_invariant_tsc; unsigned int do_nhm_platform_info; unsigned int extra_msr_offset32; unsigned int extra_msr_offset64; unsigned int extra_delta_offset32; unsigned int extra_delta_offset64; unsigned int aperf_mperf_multiplier = 1; int do_smi; double bclk; double base_hz; unsigned int has_base_hz; double tsc_tweak = 1.0; unsigned int show_pkg; unsigned int show_core; unsigned int show_cpu; unsigned int show_pkg_only; unsigned int show_core_only; char *output_buffer, *outp; unsigned int do_rapl; unsigned int do_dts; unsigned int do_ptm; unsigned int tcc_activation_temp; unsigned int tcc_activation_temp_override; double rapl_power_units, rapl_time_units; double rapl_dram_energy_units, rapl_energy_units; double rapl_joule_counter_range; unsigned int do_core_perf_limit_reasons; unsigned int do_gfx_perf_limit_reasons; unsigned int do_ring_perf_limit_reasons; unsigned int crystal_hz; unsigned long long tsc_hz; int base_cpu; double discover_bclk(unsigned int family, unsigned int model); #define RAPL_PKG (1 << 0) /* 0x610 MSR_PKG_POWER_LIMIT */ /* 0x611 MSR_PKG_ENERGY_STATUS */ #define RAPL_PKG_PERF_STATUS (1 << 1) /* 0x613 MSR_PKG_PERF_STATUS */ #define RAPL_PKG_POWER_INFO (1 << 2) /* 0x614 MSR_PKG_POWER_INFO */ #define RAPL_DRAM (1 << 3) /* 0x618 MSR_DRAM_POWER_LIMIT */ /* 0x619 MSR_DRAM_ENERGY_STATUS */ #define RAPL_DRAM_PERF_STATUS (1 << 4) /* 0x61b MSR_DRAM_PERF_STATUS */ #define RAPL_DRAM_POWER_INFO (1 << 5) /* 0x61c MSR_DRAM_POWER_INFO */ #define RAPL_CORES (1 << 6) /* 0x638 MSR_PP0_POWER_LIMIT */ /* 0x639 MSR_PP0_ENERGY_STATUS */ #define RAPL_CORE_POLICY (1 << 7) /* 0x63a MSR_PP0_POLICY */ #define RAPL_GFX (1 << 8) /* 0x640 MSR_PP1_POWER_LIMIT */ /* 0x641 MSR_PP1_ENERGY_STATUS */ /* 0x642 MSR_PP1_POLICY */ #define TJMAX_DEFAULT 100 #define MAX(a, b) ((a) > (b) ? (a) : (b)) int aperf_mperf_unstable; int backwards_count; char *progname; cpu_set_t *cpu_present_set, *cpu_affinity_set; size_t cpu_present_setsize, cpu_affinity_setsize; struct thread_data { unsigned long long tsc; unsigned long long aperf; unsigned long long mperf; unsigned long long c1; unsigned long long extra_msr64; unsigned long long extra_delta64; unsigned long long extra_msr32; unsigned long long extra_delta32; unsigned int smi_count; unsigned int cpu_id; unsigned int flags; #define CPU_IS_FIRST_THREAD_IN_CORE 0x2 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4 } *thread_even, *thread_odd; struct core_data { unsigned long long c3; unsigned long long c6; unsigned long long c7; unsigned int core_temp_c; unsigned int core_id; } *core_even, *core_odd; struct pkg_data { unsigned long long pc2; unsigned long long pc3; unsigned long long pc6; unsigned long long pc7; unsigned long long pc8; unsigned long long pc9; unsigned long long pc10; unsigned long long pkg_wtd_core_c0; unsigned long long pkg_any_core_c0; unsigned long long pkg_any_gfxe_c0; unsigned long long pkg_both_core_gfxe_c0; unsigned int package_id; unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */ unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */ unsigned int energy_cores; /* MSR_PP0_ENERGY_STATUS */ unsigned int energy_gfx; /* MSR_PP1_ENERGY_STATUS */ unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */ unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */ unsigned int pkg_temp_c; } *package_even, *package_odd; #define ODD_COUNTERS thread_odd, core_odd, package_odd #define EVEN_COUNTERS thread_even, core_even, package_even #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \ (thread_base + (pkg_no) * topo.num_cores_per_pkg * \ topo.num_threads_per_core + \ (core_no) * topo.num_threads_per_core + (thread_no)) #define GET_CORE(core_base, core_no, pkg_no) \ (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no)) #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no) struct system_summary { struct thread_data threads; struct core_data cores; struct pkg_data packages; } sum, average; struct topo_params { int num_packages; int num_cpus; int num_cores; int max_cpu_num; int num_cores_per_pkg; int num_threads_per_core; } topo; struct timeval tv_even, tv_odd, tv_delta; void setup_all_buffers(void); int cpu_is_not_present(int cpu) { return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set); } /* * run func(thread, core, package) in topology order * skip non-present cpus */ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *), struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base) { int retval, pkg_no, core_no, thread_no; for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) { for (thread_no = 0; thread_no < topo.num_threads_per_core; ++thread_no) { struct thread_data *t; struct core_data *c; struct pkg_data *p; t = GET_THREAD(thread_base, thread_no, core_no, pkg_no); if (cpu_is_not_present(t->cpu_id)) continue; c = GET_CORE(core_base, core_no, pkg_no); p = GET_PKG(pkg_base, pkg_no); retval = func(t, c, p); if (retval) return retval; } } } return 0; } int cpu_migrate(int cpu) { CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set); if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) return -1; else return 0; } int get_msr(int cpu, off_t offset, unsigned long long *msr) { ssize_t retval; char pathname[32]; int fd; sprintf(pathname, "/dev/cpu/%d/msr", cpu); fd = open(pathname, O_RDONLY); if (fd < 0) err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname); retval = pread(fd, msr, sizeof *msr, offset); close(fd); if (retval != sizeof *msr) err(-1, "%s offset 0x%llx read failed", pathname, (unsigned long long)offset); return 0; } /* * Example Format w/ field column widths: * * Package Core CPU Avg_MHz Bzy_MHz TSC_MHz SMI %Busy CPU_%c1 CPU_%c3 CPU_%c6 CPU_%c7 CoreTmp PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt * 123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678 */ void print_header(void) { if (show_pkg) outp += sprintf(outp, " Package"); if (show_core) outp += sprintf(outp, " Core"); if (show_cpu) outp += sprintf(outp, " CPU"); if (has_aperf) outp += sprintf(outp, " Avg_MHz"); if (has_aperf) outp += sprintf(outp, " %%Busy"); if (has_aperf) outp += sprintf(outp, " Bzy_MHz"); outp += sprintf(outp, " TSC_MHz"); if (extra_delta_offset32) outp += sprintf(outp, " count 0x%03X", extra_delta_offset32); if (extra_delta_offset64) outp += sprintf(outp, " COUNT 0x%03X", extra_delta_offset64); if (extra_msr_offset32) outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset32); if (extra_msr_offset64) outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset64); if (!debug) goto done; if (do_smi) outp += sprintf(outp, " SMI"); if (do_nhm_cstates) outp += sprintf(outp, " CPU%%c1"); if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) outp += sprintf(outp, " CPU%%c3"); if (do_nhm_cstates) outp += sprintf(outp, " CPU%%c6"); if (do_snb_cstates) outp += sprintf(outp, " CPU%%c7"); if (do_dts) outp += sprintf(outp, " CoreTmp"); if (do_ptm) outp += sprintf(outp, " PkgTmp"); if (do_skl_residency) { outp += sprintf(outp, " Totl%%C0"); outp += sprintf(outp, " Any%%C0"); outp += sprintf(outp, " GFX%%C0"); outp += sprintf(outp, " CPUGFX%%"); } if (do_pc2) outp += sprintf(outp, " Pkg%%pc2"); if (do_pc3) outp += sprintf(outp, " Pkg%%pc3"); if (do_pc6) outp += sprintf(outp, " Pkg%%pc6"); if (do_pc7) outp += sprintf(outp, " Pkg%%pc7"); if (do_c8_c9_c10) { outp += sprintf(outp, " Pkg%%pc8"); outp += sprintf(outp, " Pkg%%pc9"); outp += sprintf(outp, " Pk%%pc10"); } if (do_rapl && !rapl_joules) { if (do_rapl & RAPL_PKG) outp += sprintf(outp, " PkgWatt"); if (do_rapl & RAPL_CORES) outp += sprintf(outp, " CorWatt"); if (do_rapl & RAPL_GFX) outp += sprintf(outp, " GFXWatt"); if (do_rapl & RAPL_DRAM) outp += sprintf(outp, " RAMWatt"); if (do_rapl & RAPL_PKG_PERF_STATUS) outp += sprintf(outp, " PKG_%%"); if (do_rapl & RAPL_DRAM_PERF_STATUS) outp += sprintf(outp, " RAM_%%"); } else if (do_rapl && rapl_joules) { if (do_rapl & RAPL_PKG) outp += sprintf(outp, " Pkg_J"); if (do_rapl & RAPL_CORES) outp += sprintf(outp, " Cor_J"); if (do_rapl & RAPL_GFX) outp += sprintf(outp, " GFX_J"); if (do_rapl & RAPL_DRAM) outp += sprintf(outp, " RAM_J"); if (do_rapl & RAPL_PKG_PERF_STATUS) outp += sprintf(outp, " PKG_%%"); if (do_rapl & RAPL_DRAM_PERF_STATUS) outp += sprintf(outp, " RAM_%%"); outp += sprintf(outp, " time"); } done: outp += sprintf(outp, "\n"); } int dump_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p); if (t) { outp += sprintf(outp, "CPU: %d flags 0x%x\n", t->cpu_id, t->flags); outp += sprintf(outp, "TSC: %016llX\n", t->tsc); outp += sprintf(outp, "aperf: %016llX\n", t->aperf); outp += sprintf(outp, "mperf: %016llX\n", t->mperf); outp += sprintf(outp, "c1: %016llX\n", t->c1); outp += sprintf(outp, "msr0x%x: %08llX\n", extra_delta_offset32, t->extra_delta32); outp += sprintf(outp, "msr0x%x: %016llX\n", extra_delta_offset64, t->extra_delta64); outp += sprintf(outp, "msr0x%x: %08llX\n", extra_msr_offset32, t->extra_msr32); outp += sprintf(outp, "msr0x%x: %016llX\n", extra_msr_offset64, t->extra_msr64); if (do_smi) outp += sprintf(outp, "SMI: %08X\n", t->smi_count); } if (c) { outp += sprintf(outp, "core: %d\n", c->core_id); outp += sprintf(outp, "c3: %016llX\n", c->c3); outp += sprintf(outp, "c6: %016llX\n", c->c6); outp += sprintf(outp, "c7: %016llX\n", c->c7); outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c); } if (p) { outp += sprintf(outp, "package: %d\n", p->package_id); outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0); outp += sprintf(outp, "Any cores: %016llX\n", p->pkg_any_core_c0); outp += sprintf(outp, "Any GFX: %016llX\n", p->pkg_any_gfxe_c0); outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0); outp += sprintf(outp, "pc2: %016llX\n", p->pc2); if (do_pc3) outp += sprintf(outp, "pc3: %016llX\n", p->pc3); if (do_pc6) outp += sprintf(outp, "pc6: %016llX\n", p->pc6); if (do_pc7) outp += sprintf(outp, "pc7: %016llX\n", p->pc7); outp += sprintf(outp, "pc8: %016llX\n", p->pc8); outp += sprintf(outp, "pc9: %016llX\n", p->pc9); outp += sprintf(outp, "pc10: %016llX\n", p->pc10); outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg); outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores); outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx); outp += sprintf(outp, "Joules RAM: %0X\n", p->energy_dram); outp += sprintf(outp, "Throttle PKG: %0X\n", p->rapl_pkg_perf_status); outp += sprintf(outp, "Throttle RAM: %0X\n", p->rapl_dram_perf_status); outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c); } outp += sprintf(outp, "\n"); return 0; } /* * column formatting convention & formats */ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { double interval_float; char *fmt8; /* if showing only 1st thread in core and this isn't one, bail out */ if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; /* if showing only 1st thread in pkg and this isn't one, bail out */ if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; /* topo columns, print blanks on 1st (average) line */ if (t == &average.threads) { if (show_pkg) outp += sprintf(outp, " -"); if (show_core) outp += sprintf(outp, " -"); if (show_cpu) outp += sprintf(outp, " -"); } else { if (show_pkg) { if (p) outp += sprintf(outp, "%8d", p->package_id); else outp += sprintf(outp, " -"); } if (show_core) { if (c) outp += sprintf(outp, "%8d", c->core_id); else outp += sprintf(outp, " -"); } if (show_cpu) outp += sprintf(outp, "%8d", t->cpu_id); } /* Avg_MHz */ if (has_aperf) outp += sprintf(outp, "%8.0f", 1.0 / units * t->aperf / interval_float); /* %Busy */ if (has_aperf) { if (!skip_c0) outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc/tsc_tweak); else outp += sprintf(outp, "********"); } /* Bzy_MHz */ if (has_aperf) { if (has_base_hz) outp += sprintf(outp, "%8.0f", base_hz / units * t->aperf / t->mperf); else outp += sprintf(outp, "%8.0f", 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); } /* TSC_MHz */ outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float); /* delta */ if (extra_delta_offset32) outp += sprintf(outp, " %11llu", t->extra_delta32); /* DELTA */ if (extra_delta_offset64) outp += sprintf(outp, " %11llu", t->extra_delta64); /* msr */ if (extra_msr_offset32) outp += sprintf(outp, " 0x%08llx", t->extra_msr32); /* MSR */ if (extra_msr_offset64) outp += sprintf(outp, " 0x%016llx", t->extra_msr64); if (!debug) goto done; /* SMI */ if (do_smi) outp += sprintf(outp, "%8d", t->smi_count); if (do_nhm_cstates) { if (!skip_c1) outp += sprintf(outp, "%8.2f", 100.0 * t->c1/t->tsc); else outp += sprintf(outp, "********"); } /* print per-core data only for 1st thread in core */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) goto done; if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc); if (do_nhm_cstates) outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc); if (do_snb_cstates) outp += sprintf(outp, "%8.2f", 100.0 * c->c7/t->tsc); if (do_dts) outp += sprintf(outp, "%8d", c->core_temp_c); /* print per-package data only for 1st core in package */ if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) goto done; /* PkgTmp */ if (do_ptm) outp += sprintf(outp, "%8d", p->pkg_temp_c); /* Totl%C0, Any%C0 GFX%C0 CPUGFX% */ if (do_skl_residency) { outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_wtd_core_c0/t->tsc); outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_any_core_c0/t->tsc); outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_any_gfxe_c0/t->tsc); outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_both_core_gfxe_c0/t->tsc); } if (do_pc2) outp += sprintf(outp, "%8.2f", 100.0 * p->pc2/t->tsc); if (do_pc3) outp += sprintf(outp, "%8.2f", 100.0 * p->pc3/t->tsc); if (do_pc6) outp += sprintf(outp, "%8.2f", 100.0 * p->pc6/t->tsc); if (do_pc7) outp += sprintf(outp, "%8.2f", 100.0 * p->pc7/t->tsc); if (do_c8_c9_c10) { outp += sprintf(outp, "%8.2f", 100.0 * p->pc8/t->tsc); outp += sprintf(outp, "%8.2f", 100.0 * p->pc9/t->tsc); outp += sprintf(outp, "%8.2f", 100.0 * p->pc10/t->tsc); } /* * If measurement interval exceeds minimum RAPL Joule Counter range, * indicate that results are suspect by printing "**" in fraction place. */ if (interval_float < rapl_joule_counter_range) fmt8 = "%8.2f"; else fmt8 = " %6.0f**"; if (do_rapl && !rapl_joules) { if (do_rapl & RAPL_PKG) outp += sprintf(outp, fmt8, p->energy_pkg * rapl_energy_units / interval_float); if (do_rapl & RAPL_CORES) outp += sprintf(outp, fmt8, p->energy_cores * rapl_energy_units / interval_float); if (do_rapl & RAPL_GFX) outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units / interval_float); if (do_rapl & RAPL_DRAM) outp += sprintf(outp, fmt8, p->energy_dram * rapl_dram_energy_units / interval_float); if (do_rapl & RAPL_PKG_PERF_STATUS) outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); if (do_rapl & RAPL_DRAM_PERF_STATUS) outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); } else if (do_rapl && rapl_joules) { if (do_rapl & RAPL_PKG) outp += sprintf(outp, fmt8, p->energy_pkg * rapl_energy_units); if (do_rapl & RAPL_CORES) outp += sprintf(outp, fmt8, p->energy_cores * rapl_energy_units); if (do_rapl & RAPL_GFX) outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units); if (do_rapl & RAPL_DRAM) outp += sprintf(outp, fmt8, p->energy_dram * rapl_dram_energy_units); if (do_rapl & RAPL_PKG_PERF_STATUS) outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); if (do_rapl & RAPL_DRAM_PERF_STATUS) outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); outp += sprintf(outp, fmt8, interval_float); } done: outp += sprintf(outp, "\n"); return 0; } void flush_stdout() { fputs(output_buffer, stdout); fflush(stdout); outp = output_buffer; } void flush_stderr() { fputs(output_buffer, stderr); outp = output_buffer; } void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { static int printed; if (!printed || !summary_only) print_header(); if (topo.num_cpus > 1) format_counters(&average.threads, &average.cores, &average.packages); printed = 1; if (summary_only) return; for_all_cpus(format_counters, t, c, p); } #define DELTA_WRAP32(new, old) \ if (new > old) { \ old = new - old; \ } else { \ old = 0x100000000 + new - old; \ } void delta_package(struct pkg_data *new, struct pkg_data *old) { if (do_skl_residency) { old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0; old->pkg_any_core_c0 = new->pkg_any_core_c0 - old->pkg_any_core_c0; old->pkg_any_gfxe_c0 = new->pkg_any_gfxe_c0 - old->pkg_any_gfxe_c0; old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0; } old->pc2 = new->pc2 - old->pc2; if (do_pc3) old->pc3 = new->pc3 - old->pc3; if (do_pc6) old->pc6 = new->pc6 - old->pc6; if (do_pc7) old->pc7 = new->pc7 - old->pc7; old->pc8 = new->pc8 - old->pc8; old->pc9 = new->pc9 - old->pc9; old->pc10 = new->pc10 - old->pc10; old->pkg_temp_c = new->pkg_temp_c; DELTA_WRAP32(new->energy_pkg, old->energy_pkg); DELTA_WRAP32(new->energy_cores, old->energy_cores); DELTA_WRAP32(new->energy_gfx, old->energy_gfx); DELTA_WRAP32(new->energy_dram, old->energy_dram); DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status); DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status); } void delta_core(struct core_data *new, struct core_data *old) { old->c3 = new->c3 - old->c3; old->c6 = new->c6 - old->c6; old->c7 = new->c7 - old->c7; old->core_temp_c = new->core_temp_c; } /* * old = new - old */ void delta_thread(struct thread_data *new, struct thread_data *old, struct core_data *core_delta) { old->tsc = new->tsc - old->tsc; /* check for TSC < 1 Mcycles over interval */ if (old->tsc < (1000 * 1000)) errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n" "You can disable all c-states by booting with \"idle=poll\"\n" "or just the deep ones with \"processor.max_cstate=1\""); old->c1 = new->c1 - old->c1; if (has_aperf) { if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { old->aperf = new->aperf - old->aperf; old->mperf = new->mperf - old->mperf; } else { if (!aperf_mperf_unstable) { fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname); fprintf(stderr, "* Frequency results do not cover entire interval *\n"); fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n"); aperf_mperf_unstable = 1; } /* * mperf delta is likely a huge "positive" number * can not use it for calculating c0 time */ skip_c0 = 1; skip_c1 = 1; } } if (use_c1_residency_msr) { /* * Some models have a dedicated C1 residency MSR, * which should be more accurate than the derivation below. */ } else { /* * As counter collection is not atomic, * it is possible for mperf's non-halted cycles + idle states * to exceed TSC's all cycles: show c1 = 0% in that case. */ if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc) old->c1 = 0; else { /* normal case, derive c1 */ old->c1 = old->tsc - old->mperf - core_delta->c3 - core_delta->c6 - core_delta->c7; } } if (old->mperf == 0) { if (debug > 1) fprintf(stderr, "cpu%d MPERF 0!\n", old->cpu_id); old->mperf = 1; /* divide by 0 protection */ } old->extra_delta32 = new->extra_delta32 - old->extra_delta32; old->extra_delta32 &= 0xFFFFFFFF; old->extra_delta64 = new->extra_delta64 - old->extra_delta64; /* * Extra MSR is just a snapshot, simply copy latest w/o subtracting */ old->extra_msr32 = new->extra_msr32; old->extra_msr64 = new->extra_msr64; if (do_smi) old->smi_count = new->smi_count - old->smi_count; } int delta_cpu(struct thread_data *t, struct core_data *c, struct pkg_data *p, struct thread_data *t2, struct core_data *c2, struct pkg_data *p2) { /* calculate core delta only for 1st thread in core */ if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE) delta_core(c, c2); /* always calculate thread delta */ delta_thread(t, t2, c2); /* c2 is core delta */ /* calculate package delta only for 1st core in package */ if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE) delta_package(p, p2); return 0; } void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { t->tsc = 0; t->aperf = 0; t->mperf = 0; t->c1 = 0; t->smi_count = 0; t->extra_delta32 = 0; t->extra_delta64 = 0; /* tells format_counters to dump all fields from this set */ t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE; c->c3 = 0; c->c6 = 0; c->c7 = 0; c->core_temp_c = 0; p->pkg_wtd_core_c0 = 0; p->pkg_any_core_c0 = 0; p->pkg_any_gfxe_c0 = 0; p->pkg_both_core_gfxe_c0 = 0; p->pc2 = 0; if (do_pc3) p->pc3 = 0; if (do_pc6) p->pc6 = 0; if (do_pc7) p->pc7 = 0; p->pc8 = 0; p->pc9 = 0; p->pc10 = 0; p->energy_pkg = 0; p->energy_dram = 0; p->energy_cores = 0; p->energy_gfx = 0; p->rapl_pkg_perf_status = 0; p->rapl_dram_perf_status = 0; p->pkg_temp_c = 0; } int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { average.threads.tsc += t->tsc; average.threads.aperf += t->aperf; average.threads.mperf += t->mperf; average.threads.c1 += t->c1; average.threads.extra_delta32 += t->extra_delta32; average.threads.extra_delta64 += t->extra_delta64; /* sum per-core values only for 1st thread in core */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; average.cores.c3 += c->c3; average.cores.c6 += c->c6; average.cores.c7 += c->c7; average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c); /* sum per-pkg values only for 1st core in pkg */ if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; if (do_skl_residency) { average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0; average.packages.pkg_any_core_c0 += p->pkg_any_core_c0; average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0; average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0; } average.packages.pc2 += p->pc2; if (do_pc3) average.packages.pc3 += p->pc3; if (do_pc6) average.packages.pc6 += p->pc6; if (do_pc7) average.packages.pc7 += p->pc7; average.packages.pc8 += p->pc8; average.packages.pc9 += p->pc9; average.packages.pc10 += p->pc10; average.packages.energy_pkg += p->energy_pkg; average.packages.energy_dram += p->energy_dram; average.packages.energy_cores += p->energy_cores; average.packages.energy_gfx += p->energy_gfx; average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c); average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status; average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status; return 0; } /* * sum the counters for all cpus in the system * compute the weighted average */ void compute_average(struct thread_data *t, struct core_data *c, struct pkg_data *p) { clear_counters(&average.threads, &average.cores, &average.packages); for_all_cpus(sum_counters, t, c, p); average.threads.tsc /= topo.num_cpus; average.threads.aperf /= topo.num_cpus; average.threads.mperf /= topo.num_cpus; average.threads.c1 /= topo.num_cpus; average.threads.extra_delta32 /= topo.num_cpus; average.threads.extra_delta32 &= 0xFFFFFFFF; average.threads.extra_delta64 /= topo.num_cpus; average.cores.c3 /= topo.num_cores; average.cores.c6 /= topo.num_cores; average.cores.c7 /= topo.num_cores; if (do_skl_residency) { average.packages.pkg_wtd_core_c0 /= topo.num_packages; average.packages.pkg_any_core_c0 /= topo.num_packages; average.packages.pkg_any_gfxe_c0 /= topo.num_packages; average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages; } average.packages.pc2 /= topo.num_packages; if (do_pc3) average.packages.pc3 /= topo.num_packages; if (do_pc6) average.packages.pc6 /= topo.num_packages; if (do_pc7) average.packages.pc7 /= topo.num_packages; average.packages.pc8 /= topo.num_packages; average.packages.pc9 /= topo.num_packages; average.packages.pc10 /= topo.num_packages; } static unsigned long long rdtsc(void) { unsigned int low, high; asm volatile("rdtsc" : "=a" (low), "=d" (high)); return low | ((unsigned long long)high) << 32; } /* * get_counters(...) * migrate to cpu * acquire and record local counters for that cpu */ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { int cpu = t->cpu_id; unsigned long long msr; if (cpu_migrate(cpu)) { fprintf(stderr, "Could not migrate to CPU %d\n", cpu); return -1; } t->tsc = rdtsc(); /* we are running on local CPU of interest */ if (has_aperf) { if (get_msr(cpu, MSR_IA32_APERF, &t->aperf)) return -3; if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) return -4; t->aperf = t->aperf * aperf_mperf_multiplier; t->mperf = t->mperf * aperf_mperf_multiplier; } if (do_smi) { if (get_msr(cpu, MSR_SMI_COUNT, &msr)) return -5; t->smi_count = msr & 0xFFFFFFFF; } if (extra_delta_offset32) { if (get_msr(cpu, extra_delta_offset32, &msr)) return -5; t->extra_delta32 = msr & 0xFFFFFFFF; } if (extra_delta_offset64) if (get_msr(cpu, extra_delta_offset64, &t->extra_delta64)) return -5; if (extra_msr_offset32) { if (get_msr(cpu, extra_msr_offset32, &msr)) return -5; t->extra_msr32 = msr & 0xFFFFFFFF; } if (extra_msr_offset64) if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64)) return -5; if (use_c1_residency_msr) { if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1)) return -6; } /* collect core counters only for 1st thread in core */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) { if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) return -6; } if (do_nhm_cstates && !do_knl_cstates) { if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) return -7; } else if (do_knl_cstates) { if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) return -7; } if (do_snb_cstates) if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) return -8; if (do_dts) { if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) return -9; c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); } /* collect package counters only for 1st core in package */ if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; if (do_skl_residency) { if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0)) return -10; if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0)) return -11; if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0)) return -12; if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0)) return -13; } if (do_pc3) if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3)) return -9; if (do_pc6) if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6)) return -10; if (do_pc2) if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2)) return -11; if (do_pc7) if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7)) return -12; if (do_c8_c9_c10) { if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8)) return -13; if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9)) return -13; if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10)) return -13; } if (do_rapl & RAPL_PKG) { if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr)) return -13; p->energy_pkg = msr & 0xFFFFFFFF; } if (do_rapl & RAPL_CORES) { if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr)) return -14; p->energy_cores = msr & 0xFFFFFFFF; } if (do_rapl & RAPL_DRAM) { if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr)) return -15; p->energy_dram = msr & 0xFFFFFFFF; } if (do_rapl & RAPL_GFX) { if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr)) return -16; p->energy_gfx = msr & 0xFFFFFFFF; } if (do_rapl & RAPL_PKG_PERF_STATUS) { if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr)) return -16; p->rapl_pkg_perf_status = msr & 0xFFFFFFFF; } if (do_rapl & RAPL_DRAM_PERF_STATUS) { if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr)) return -16; p->rapl_dram_perf_status = msr & 0xFFFFFFFF; } if (do_ptm) { if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) return -17; p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); } return 0; } /* * MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit: * If you change the values, note they are used both in comparisons * (>= PCL__7) and to index pkg_cstate_limit_strings[]. */ #define PCLUKN 0 /* Unknown */ #define PCLRSV 1 /* Reserved */ #define PCL__0 2 /* PC0 */ #define PCL__1 3 /* PC1 */ #define PCL__2 4 /* PC2 */ #define PCL__3 5 /* PC3 */ #define PCL__4 6 /* PC4 */ #define PCL__6 7 /* PC6 */ #define PCL_6N 8 /* PC6 No Retention */ #define PCL_6R 9 /* PC6 Retention */ #define PCL__7 10 /* PC7 */ #define PCL_7S 11 /* PC7 Shrink */ #define PCL__8 12 /* PC8 */ #define PCL__9 13 /* PC9 */ #define PCLUNL 14 /* Unlimited */ int pkg_cstate_limit = PCLUKN; char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2", "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "unlimited"}; int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; static void calculate_tsc_tweak() { tsc_tweak = base_hz / tsc_hz; } static void dump_nhm_platform_info(void) { unsigned long long msr; unsigned int ratio; get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); fprintf(stderr, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); ratio = (msr >> 40) & 0xFF; fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0xFF; fprintf(stderr, "%d * %.0f = %.0f MHz base frequency\n", ratio, bclk, ratio * bclk); get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr); fprintf(stderr, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n", base_cpu, msr, msr & 0x2 ? "EN" : "DIS"); return; } static void dump_hsw_turbo_ratio_limits(void) { unsigned long long msr; unsigned int ratio; get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr); fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", base_cpu, msr); ratio = (msr >> 8) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 18 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 0) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 17 active cores\n", ratio, bclk, ratio * bclk); return; } static void dump_ivt_turbo_ratio_limits(void) { unsigned long long msr; unsigned int ratio; get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr); fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, msr); ratio = (msr >> 56) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 16 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 48) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 15 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 40) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 14 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 32) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 13 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 24) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 12 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 16) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 11 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 10 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 0) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 9 active cores\n", ratio, bclk, ratio * bclk); return; } static void dump_nhm_turbo_ratio_limits(void) { unsigned long long msr; unsigned int ratio; get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr); fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr); ratio = (msr >> 56) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 8 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 48) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 7 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 40) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 6 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 32) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 5 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 24) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 4 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 16) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 3 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 2 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 0) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n", ratio, bclk, ratio * bclk); return; } static void dump_knl_turbo_ratio_limits(void) { int cores; unsigned int ratio; unsigned long long msr; int delta_cores; int delta_ratio; int i; get_msr(base_cpu, MSR_NHM_TURBO_RATIO_LIMIT, &msr); fprintf(stderr, "cpu%d: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr); /** * Turbo encoding in KNL is as follows: * [7:0] -- Base value of number of active cores of bucket 1. * [15:8] -- Base value of freq ratio of bucket 1. * [20:16] -- +ve delta of number of active cores of bucket 2. * i.e. active cores of bucket 2 = * active cores of bucket 1 + delta * [23:21] -- Negative delta of freq ratio of bucket 2. * i.e. freq ratio of bucket 2 = * freq ratio of bucket 1 - delta * [28:24]-- +ve delta of number of active cores of bucket 3. * [31:29]-- -ve delta of freq ratio of bucket 3. * [36:32]-- +ve delta of number of active cores of bucket 4. * [39:37]-- -ve delta of freq ratio of bucket 4. * [44:40]-- +ve delta of number of active cores of bucket 5. * [47:45]-- -ve delta of freq ratio of bucket 5. * [52:48]-- +ve delta of number of active cores of bucket 6. * [55:53]-- -ve delta of freq ratio of bucket 6. * [60:56]-- +ve delta of number of active cores of bucket 7. * [63:61]-- -ve delta of freq ratio of bucket 7. */ cores = msr & 0xFF; ratio = (msr >> 8) && 0xFF; if (ratio > 0) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo %d active cores\n", ratio, bclk, ratio * bclk, cores); for (i = 16; i < 64; i = i + 8) { delta_cores = (msr >> i) & 0x1F; delta_ratio = (msr >> (i + 5)) && 0x7; if (!delta_cores || !delta_ratio) return; cores = cores + delta_cores; ratio = ratio - delta_ratio; /** -ve ratios will make successive ratio calculations * negative. Hence return instead of carrying on. */ if (ratio > 0) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo %d active cores\n", ratio, bclk, ratio * bclk, cores); } } static void dump_nhm_cst_cfg(void) { unsigned long long msr; get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) fprintf(stderr, "cpu%d: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", base_cpu, msr); fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n", (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "", (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "", (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "", (msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "", (msr & (1 << 15)) ? "" : "UN", (unsigned int)msr & 7, pkg_cstate_limit_strings[pkg_cstate_limit]); return; } static void dump_config_tdp(void) { unsigned long long msr; get_msr(base_cpu, MSR_CONFIG_TDP_NOMINAL, &msr); fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_NOMINAL: 0x%08llx", base_cpu, msr); fprintf(stderr, " (base_ratio=%d)\n", (unsigned int)msr & 0xEF); get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_1, &msr); fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_LEVEL_1: 0x%08llx (", base_cpu, msr); if (msr) { fprintf(stderr, "PKG_MIN_PWR_LVL1=%d ", (unsigned int)(msr >> 48) & 0xEFFF); fprintf(stderr, "PKG_MAX_PWR_LVL1=%d ", (unsigned int)(msr >> 32) & 0xEFFF); fprintf(stderr, "LVL1_RATIO=%d ", (unsigned int)(msr >> 16) & 0xEF); fprintf(stderr, "PKG_TDP_LVL1=%d", (unsigned int)(msr) & 0xEFFF); } fprintf(stderr, ")\n"); get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_2, &msr); fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_LEVEL_2: 0x%08llx (", base_cpu, msr); if (msr) { fprintf(stderr, "PKG_MIN_PWR_LVL2=%d ", (unsigned int)(msr >> 48) & 0xEFFF); fprintf(stderr, "PKG_MAX_PWR_LVL2=%d ", (unsigned int)(msr >> 32) & 0xEFFF); fprintf(stderr, "LVL2_RATIO=%d ", (unsigned int)(msr >> 16) & 0xEF); fprintf(stderr, "PKG_TDP_LVL2=%d", (unsigned int)(msr) & 0xEFFF); } fprintf(stderr, ")\n"); get_msr(base_cpu, MSR_CONFIG_TDP_CONTROL, &msr); fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_CONTROL: 0x%08llx (", base_cpu, msr); if ((msr) & 0x3) fprintf(stderr, "TDP_LEVEL=%d ", (unsigned int)(msr) & 0x3); fprintf(stderr, " lock=%d", (unsigned int)(msr >> 31) & 1); fprintf(stderr, ")\n"); get_msr(base_cpu, MSR_TURBO_ACTIVATION_RATIO, &msr); fprintf(stderr, "cpu%d: MSR_TURBO_ACTIVATION_RATIO: 0x%08llx (", base_cpu, msr); fprintf(stderr, "MAX_NON_TURBO_RATIO=%d", (unsigned int)(msr) & 0x7F); fprintf(stderr, " lock=%d", (unsigned int)(msr >> 31) & 1); fprintf(stderr, ")\n"); } void free_all_buffers(void) { CPU_FREE(cpu_present_set); cpu_present_set = NULL; cpu_present_set = 0; CPU_FREE(cpu_affinity_set); cpu_affinity_set = NULL; cpu_affinity_setsize = 0; free(thread_even); free(core_even); free(package_even); thread_even = NULL; core_even = NULL; package_even = NULL; free(thread_odd); free(core_odd); free(package_odd); thread_odd = NULL; core_odd = NULL; package_odd = NULL; free(output_buffer); output_buffer = NULL; outp = NULL; } /* * Open a file, and exit on failure */ FILE *fopen_or_die(const char *path, const char *mode) { FILE *filep = fopen(path, "r"); if (!filep) err(1, "%s: open failed", path); return filep; } /* * Parse a file containing a single int. */ int parse_int_file(const char *fmt, ...) { va_list args; char path[PATH_MAX]; FILE *filep; int value; va_start(args, fmt); vsnprintf(path, sizeof(path), fmt, args); va_end(args); filep = fopen_or_die(path, "r"); if (fscanf(filep, "%d", &value) != 1) err(1, "%s: failed to parse number from file", path); fclose(filep); return value; } /* * get_cpu_position_in_core(cpu) * return the position of the CPU among its HT siblings in the core * return -1 if the sibling is not in list */ int get_cpu_position_in_core(int cpu) { char path[64]; FILE *filep; int this_cpu; char character; int i; sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); filep = fopen(path, "r"); if (filep == NULL) { perror(path); exit(1); } for (i = 0; i < topo.num_threads_per_core; i++) { fscanf(filep, "%d", &this_cpu); if (this_cpu == cpu) { fclose(filep); return i; } /* Account for no separator after last thread*/ if (i != (topo.num_threads_per_core - 1)) fscanf(filep, "%c", &character); } fclose(filep); return -1; } /* * cpu_is_first_core_in_package(cpu) * return 1 if given CPU is 1st core in package */ int cpu_is_first_core_in_package(int cpu) { return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu); } int get_physical_package_id(int cpu) { return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); } int get_core_id(int cpu) { return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); } int get_num_ht_siblings(int cpu) { char path[80]; FILE *filep; int sib1; int matches = 0; char character; char str[100]; char *ch; sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); filep = fopen_or_die(path, "r"); /* * file format: * A ',' separated or '-' separated set of numbers * (eg 1-2 or 1,3,4,5) */ fscanf(filep, "%d%c\n", &sib1, &character); fseek(filep, 0, SEEK_SET); fgets(str, 100, filep); ch = strchr(str, character); while (ch != NULL) { matches++; ch = strchr(ch+1, character); } fclose(filep); return matches+1; } /* * run func(thread, core, package) in topology order * skip non-present cpus */ int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *, struct pkg_data *, struct thread_data *, struct core_data *, struct pkg_data *), struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base, struct thread_data *thread_base2, struct core_data *core_base2, struct pkg_data *pkg_base2) { int retval, pkg_no, core_no, thread_no; for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) { for (thread_no = 0; thread_no < topo.num_threads_per_core; ++thread_no) { struct thread_data *t, *t2; struct core_data *c, *c2; struct pkg_data *p, *p2; t = GET_THREAD(thread_base, thread_no, core_no, pkg_no); if (cpu_is_not_present(t->cpu_id)) continue; t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no); c = GET_CORE(core_base, core_no, pkg_no); c2 = GET_CORE(core_base2, core_no, pkg_no); p = GET_PKG(pkg_base, pkg_no); p2 = GET_PKG(pkg_base2, pkg_no); retval = func(t, c, p, t2, c2, p2); if (retval) return retval; } } } return 0; } /* * run func(cpu) on every cpu in /proc/stat * return max_cpu number */ int for_all_proc_cpus(int (func)(int)) { FILE *fp; int cpu_num; int retval; fp = fopen_or_die(proc_stat, "r"); retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n"); if (retval != 0) err(1, "%s: failed to parse format", proc_stat); while (1) { retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num); if (retval != 1) break; retval = func(cpu_num); if (retval) { fclose(fp); return(retval); } } fclose(fp); return 0; } void re_initialize(void) { free_all_buffers(); setup_all_buffers(); printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus); } /* * count_cpus() * remember the last one seen, it will be the max */ int count_cpus(int cpu) { if (topo.max_cpu_num < cpu) topo.max_cpu_num = cpu; topo.num_cpus += 1; return 0; } int mark_cpu_present(int cpu) { CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set); return 0; } void turbostat_loop() { int retval; int restarted = 0; restart: restarted++; retval = for_all_cpus(get_counters, EVEN_COUNTERS); if (retval < -1) { exit(retval); } else if (retval == -1) { if (restarted > 1) { exit(retval); } re_initialize(); goto restart; } restarted = 0; gettimeofday(&tv_even, (struct timezone *)NULL); while (1) { if (for_all_proc_cpus(cpu_is_not_present)) { re_initialize(); goto restart; } sleep(interval_sec); retval = for_all_cpus(get_counters, ODD_COUNTERS); if (retval < -1) { exit(retval); } else if (retval == -1) { re_initialize(); goto restart; } gettimeofday(&tv_odd, (struct timezone *)NULL); timersub(&tv_odd, &tv_even, &tv_delta); for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS); compute_average(EVEN_COUNTERS); format_all_counters(EVEN_COUNTERS); flush_stdout(); sleep(interval_sec); retval = for_all_cpus(get_counters, EVEN_COUNTERS); if (retval < -1) { exit(retval); } else if (retval == -1) { re_initialize(); goto restart; } gettimeofday(&tv_even, (struct timezone *)NULL); timersub(&tv_even, &tv_odd, &tv_delta); for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS); compute_average(ODD_COUNTERS); format_all_counters(ODD_COUNTERS); flush_stdout(); } } void check_dev_msr() { struct stat sb; char pathname[32]; sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); if (stat(pathname, &sb)) if (system("/sbin/modprobe msr > /dev/null 2>&1")) err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); } void check_permissions() { struct __user_cap_header_struct cap_header_data; cap_user_header_t cap_header = &cap_header_data; struct __user_cap_data_struct cap_data_data; cap_user_data_t cap_data = &cap_data_data; extern int capget(cap_user_header_t hdrp, cap_user_data_t datap); int do_exit = 0; char pathname[32]; /* check for CAP_SYS_RAWIO */ cap_header->pid = getpid(); cap_header->version = _LINUX_CAPABILITY_VERSION; if (capget(cap_header, cap_data) < 0) err(-6, "capget(2) failed"); if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) { do_exit++; warnx("capget(CAP_SYS_RAWIO) failed," " try \"# setcap cap_sys_rawio=ep %s\"", progname); } /* test file permissions */ sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); if (euidaccess(pathname, R_OK)) { do_exit++; warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr"); } /* if all else fails, thell them to be root */ if (do_exit) if (getuid() != 0) warnx("... or simply run as root"); if (do_exit) exit(-6); } /* * NHM adds support for additional MSRs: * * MSR_SMI_COUNT 0x00000034 * * MSR_PLATFORM_INFO 0x000000ce * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 * * MSR_PKG_C3_RESIDENCY 0x000003f8 * MSR_PKG_C6_RESIDENCY 0x000003f9 * MSR_CORE_C3_RESIDENCY 0x000003fc * MSR_CORE_C6_RESIDENCY 0x000003fd * * Side effect: * sets global pkg_cstate_limit to decode MSR_NHM_SNB_PKG_CST_CFG_CTL */ int probe_nhm_msrs(unsigned int family, unsigned int model) { unsigned long long msr; unsigned int base_ratio; int *pkg_cstate_limits; if (!genuine_intel) return 0; if (family != 6) return 0; bclk = discover_bclk(family, model); switch (model) { case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */ case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */ case 0x1F: /* Core i7 and i5 Processor - Nehalem */ case 0x25: /* Westmere Client - Clarkdale, Arrandale */ case 0x2C: /* Westmere EP - Gulftown */ case 0x2E: /* Nehalem-EX Xeon - Beckton */ case 0x2F: /* Westmere-EX Xeon - Eagleton */ pkg_cstate_limits = nhm_pkg_cstate_limits; break; case 0x2A: /* SNB */ case 0x2D: /* SNB Xeon */ case 0x3A: /* IVB */ case 0x3E: /* IVB Xeon */ pkg_cstate_limits = snb_pkg_cstate_limits; break; case 0x3C: /* HSW */ case 0x3F: /* HSX */ case 0x45: /* HSW */ case 0x46: /* HSW */ case 0x3D: /* BDW */ case 0x47: /* BDW */ case 0x4F: /* BDX */ case 0x56: /* BDX-DE */ case 0x4E: /* SKL */ case 0x5E: /* SKL */ pkg_cstate_limits = hsw_pkg_cstate_limits; break; case 0x37: /* BYT */ case 0x4D: /* AVN */ pkg_cstate_limits = slv_pkg_cstate_limits; break; case 0x4C: /* AMT */ pkg_cstate_limits = amt_pkg_cstate_limits; break; case 0x57: /* PHI */ pkg_cstate_limits = phi_pkg_cstate_limits; break; default: return 0; } get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); base_ratio = (msr >> 8) & 0xFF; base_hz = base_ratio * bclk * 1000000; has_base_hz = 1; return 1; } int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model) { switch (model) { /* Nehalem compatible, but do not include turbo-ratio limit support */ case 0x2E: /* Nehalem-EX Xeon - Beckton */ case 0x2F: /* Westmere-EX Xeon - Eagleton */ return 0; default: return 1; } } int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case 0x3E: /* IVB Xeon */ case 0x3F: /* HSW Xeon */ return 1; default: return 0; } } int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case 0x3F: /* HSW Xeon */ return 1; default: return 0; } } int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case 0x57: /* Knights Landing */ return 1; default: return 0; } } int has_config_tdp(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case 0x3A: /* IVB */ case 0x3C: /* HSW */ case 0x3F: /* HSX */ case 0x45: /* HSW */ case 0x46: /* HSW */ case 0x3D: /* BDW */ case 0x47: /* BDW */ case 0x4F: /* BDX */ case 0x56: /* BDX-DE */ case 0x4E: /* SKL */ case 0x5E: /* SKL */ case 0x57: /* Knights Landing */ return 1; default: return 0; } } static void dump_cstate_pstate_config_info(family, model) { if (!do_nhm_platform_info) return; dump_nhm_platform_info(); if (has_hsw_turbo_ratio_limit(family, model)) dump_hsw_turbo_ratio_limits(); if (has_ivt_turbo_ratio_limit(family, model)) dump_ivt_turbo_ratio_limits(); if (has_nhm_turbo_ratio_limit(family, model)) dump_nhm_turbo_ratio_limits(); if (has_knl_turbo_ratio_limit(family, model)) dump_knl_turbo_ratio_limits(); if (has_config_tdp(family, model)) dump_config_tdp(); dump_nhm_cst_cfg(); } /* * print_epb() * Decode the ENERGY_PERF_BIAS MSR */ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; char *epb_string; int cpu; if (!has_epb) return 0; cpu = t->cpu_id; /* EPB is per-package */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; if (cpu_migrate(cpu)) { fprintf(stderr, "Could not migrate to CPU %d\n", cpu); return -1; } if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr)) return 0; switch (msr & 0xF) { case ENERGY_PERF_BIAS_PERFORMANCE: epb_string = "performance"; break; case ENERGY_PERF_BIAS_NORMAL: epb_string = "balanced"; break; case ENERGY_PERF_BIAS_POWERSAVE: epb_string = "powersave"; break; default: epb_string = "custom"; break; } fprintf(stderr, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string); return 0; } /* * print_perf_limit() */ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; int cpu; cpu = t->cpu_id; /* per-package */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; if (cpu_migrate(cpu)) { fprintf(stderr, "Could not migrate to CPU %d\n", cpu); return -1; } if (do_core_perf_limit_reasons) { get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr); fprintf(stderr, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)", (msr & 1 << 15) ? "bit15, " : "", (msr & 1 << 14) ? "bit14, " : "", (msr & 1 << 13) ? "Transitions, " : "", (msr & 1 << 12) ? "MultiCoreTurbo, " : "", (msr & 1 << 11) ? "PkgPwrL2, " : "", (msr & 1 << 10) ? "PkgPwrL1, " : "", (msr & 1 << 9) ? "CorePwr, " : "", (msr & 1 << 8) ? "Amps, " : "", (msr & 1 << 6) ? "VR-Therm, " : "", (msr & 1 << 5) ? "Auto-HWP, " : "", (msr & 1 << 4) ? "Graphics, " : "", (msr & 1 << 2) ? "bit2, " : "", (msr & 1 << 1) ? "ThermStatus, " : "", (msr & 1 << 0) ? "PROCHOT, " : ""); fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n", (msr & 1 << 31) ? "bit31, " : "", (msr & 1 << 30) ? "bit30, " : "", (msr & 1 << 29) ? "Transitions, " : "", (msr & 1 << 28) ? "MultiCoreTurbo, " : "", (msr & 1 << 27) ? "PkgPwrL2, " : "", (msr & 1 << 26) ? "PkgPwrL1, " : "", (msr & 1 << 25) ? "CorePwr, " : "", (msr & 1 << 24) ? "Amps, " : "", (msr & 1 << 22) ? "VR-Therm, " : "", (msr & 1 << 21) ? "Auto-HWP, " : "", (msr & 1 << 20) ? "Graphics, " : "", (msr & 1 << 18) ? "bit18, " : "", (msr & 1 << 17) ? "ThermStatus, " : "", (msr & 1 << 16) ? "PROCHOT, " : ""); } if (do_gfx_perf_limit_reasons) { get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr); fprintf(stderr, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s)", (msr & 1 << 0) ? "PROCHOT, " : "", (msr & 1 << 1) ? "ThermStatus, " : "", (msr & 1 << 4) ? "Graphics, " : "", (msr & 1 << 6) ? "VR-Therm, " : "", (msr & 1 << 8) ? "Amps, " : "", (msr & 1 << 9) ? "GFXPwr, " : "", (msr & 1 << 10) ? "PkgPwrL1, " : "", (msr & 1 << 11) ? "PkgPwrL2, " : ""); fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s)\n", (msr & 1 << 16) ? "PROCHOT, " : "", (msr & 1 << 17) ? "ThermStatus, " : "", (msr & 1 << 20) ? "Graphics, " : "", (msr & 1 << 22) ? "VR-Therm, " : "", (msr & 1 << 24) ? "Amps, " : "", (msr & 1 << 25) ? "GFXPwr, " : "", (msr & 1 << 26) ? "PkgPwrL1, " : "", (msr & 1 << 27) ? "PkgPwrL2, " : ""); } if (do_ring_perf_limit_reasons) { get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr); fprintf(stderr, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); fprintf(stderr, " (Active: %s%s%s%s%s%s)", (msr & 1 << 0) ? "PROCHOT, " : "", (msr & 1 << 1) ? "ThermStatus, " : "", (msr & 1 << 6) ? "VR-Therm, " : "", (msr & 1 << 8) ? "Amps, " : "", (msr & 1 << 10) ? "PkgPwrL1, " : "", (msr & 1 << 11) ? "PkgPwrL2, " : ""); fprintf(stderr, " (Logged: %s%s%s%s%s%s)\n", (msr & 1 << 16) ? "PROCHOT, " : "", (msr & 1 << 17) ? "ThermStatus, " : "", (msr & 1 << 22) ? "VR-Therm, " : "", (msr & 1 << 24) ? "Amps, " : "", (msr & 1 << 26) ? "PkgPwrL1, " : "", (msr & 1 << 27) ? "PkgPwrL2, " : ""); } return 0; } #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ double get_tdp(model) { unsigned long long msr; if (do_rapl & RAPL_PKG_POWER_INFO) if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr)) return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; switch (model) { case 0x37: case 0x4D: return 30.0; default: return 135.0; } } /* * rapl_dram_energy_units_probe() * Energy units are either hard-coded, or come from RAPL Energy Unit MSR. */ static double rapl_dram_energy_units_probe(int model, double rapl_energy_units) { /* only called for genuine_intel, family 6 */ switch (model) { case 0x3F: /* HSX */ case 0x4F: /* BDX */ case 0x56: /* BDX-DE */ case 0x57: /* KNL */ return (rapl_dram_energy_units = 15.3 / 1000000); default: return (rapl_energy_units); } } /* * rapl_probe() * * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units */ void rapl_probe(unsigned int family, unsigned int model) { unsigned long long msr; unsigned int time_unit; double tdp; if (!genuine_intel) return; if (family != 6) return; switch (model) { case 0x2A: case 0x3A: case 0x3C: /* HSW */ case 0x45: /* HSW */ case 0x46: /* HSW */ case 0x3D: /* BDW */ case 0x47: /* BDW */ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO; break; case 0x4E: /* SKL */ case 0x5E: /* SKL */ do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; break; case 0x3F: /* HSX */ case 0x4F: /* BDX */ case 0x56: /* BDX-DE */ case 0x57: /* KNL */ do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; break; case 0x2D: case 0x3E: do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO; break; case 0x37: /* BYT */ case 0x4D: /* AVN */ do_rapl = RAPL_PKG | RAPL_CORES ; break; default: return; } /* units on package 0, verify later other packages match */ if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr)) return; rapl_power_units = 1.0 / (1 << (msr & 0xF)); if (model == 0x37) rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000; else rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F)); rapl_dram_energy_units = rapl_dram_energy_units_probe(model, rapl_energy_units); time_unit = msr >> 16 & 0xF; if (time_unit == 0) time_unit = 0xA; rapl_time_units = 1.0 / (1 << (time_unit)); tdp = get_tdp(model); rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; if (debug) fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); return; } void perf_limit_reasons_probe(family, model) { if (!genuine_intel) return; if (family != 6) return; switch (model) { case 0x3C: /* HSW */ case 0x45: /* HSW */ case 0x46: /* HSW */ do_gfx_perf_limit_reasons = 1; case 0x3F: /* HSX */ do_core_perf_limit_reasons = 1; do_ring_perf_limit_reasons = 1; default: return; } } int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; unsigned int dts; int cpu; if (!(do_dts || do_ptm)) return 0; cpu = t->cpu_id; /* DTS is per-core, no need to print for each thread */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; if (cpu_migrate(cpu)) { fprintf(stderr, "Could not migrate to CPU %d\n", cpu); return -1; } if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) { if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) return 0; dts = (msr >> 16) & 0x7F; fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", cpu, msr, tcc_activation_temp - dts); #ifdef THERM_DEBUG if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr)) return 0; dts = (msr >> 16) & 0x7F; dts2 = (msr >> 8) & 0x7F; fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); #endif } if (do_dts) { unsigned int resolution; if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) return 0; dts = (msr >> 16) & 0x7F; resolution = (msr >> 27) & 0xF; fprintf(stderr, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", cpu, msr, tcc_activation_temp - dts, resolution); #ifdef THERM_DEBUG if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr)) return 0; dts = (msr >> 16) & 0x7F; dts2 = (msr >> 8) & 0x7F; fprintf(stderr, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); #endif } return 0; } void print_power_limit_msr(int cpu, unsigned long long msr, char *label) { fprintf(stderr, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n", cpu, label, ((msr >> 15) & 1) ? "EN" : "DIS", ((msr >> 0) & 0x7FFF) * rapl_power_units, (1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units, (((msr >> 16) & 1) ? "EN" : "DIS")); return; } int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; int cpu; if (!do_rapl) return 0; /* RAPL counters are per package, so print only for 1st thread/package */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; cpu = t->cpu_id; if (cpu_migrate(cpu)) { fprintf(stderr, "Could not migrate to CPU %d\n", cpu); return -1; } if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr)) return -1; if (debug) { fprintf(stderr, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx " "(%f Watts, %f Joules, %f sec.)\n", cpu, msr, rapl_power_units, rapl_energy_units, rapl_time_units); } if (do_rapl & RAPL_PKG_POWER_INFO) { if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr)) return -5; fprintf(stderr, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n", cpu, msr, ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); } if (do_rapl & RAPL_PKG) { if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr)) return -9; fprintf(stderr, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 63) & 1 ? "": "UN"); print_power_limit_msr(cpu, msr, "PKG Limit #1"); fprintf(stderr, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n", cpu, ((msr >> 47) & 1) ? "EN" : "DIS", ((msr >> 32) & 0x7FFF) * rapl_power_units, (1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units, ((msr >> 48) & 1) ? "EN" : "DIS"); } if (do_rapl & RAPL_DRAM_POWER_INFO) { if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr)) return -6; fprintf(stderr, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n", cpu, msr, ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); } if (do_rapl & RAPL_DRAM) { if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr)) return -9; fprintf(stderr, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 31) & 1 ? "": "UN"); print_power_limit_msr(cpu, msr, "DRAM Limit"); } if (do_rapl & RAPL_CORE_POLICY) { if (debug) { if (get_msr(cpu, MSR_PP0_POLICY, &msr)) return -7; fprintf(stderr, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF); } } if (do_rapl & RAPL_CORES) { if (debug) { if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr)) return -9; fprintf(stderr, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 31) & 1 ? "": "UN"); print_power_limit_msr(cpu, msr, "Cores Limit"); } } if (do_rapl & RAPL_GFX) { if (debug) { if (get_msr(cpu, MSR_PP1_POLICY, &msr)) return -8; fprintf(stderr, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF); if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr)) return -9; fprintf(stderr, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 31) & 1 ? "": "UN"); print_power_limit_msr(cpu, msr, "GFX Limit"); } } return 0; } /* * SNB adds support for additional MSRs: * * MSR_PKG_C7_RESIDENCY 0x000003fa * MSR_CORE_C7_RESIDENCY 0x000003fe * MSR_PKG_C2_RESIDENCY 0x0000060d */ int has_snb_msrs(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; switch (model) { case 0x2A: case 0x2D: case 0x3A: /* IVB */ case 0x3E: /* IVB Xeon */ case 0x3C: /* HSW */ case 0x3F: /* HSW */ case 0x45: /* HSW */ case 0x46: /* HSW */ case 0x3D: /* BDW */ case 0x47: /* BDW */ case 0x4F: /* BDX */ case 0x56: /* BDX-DE */ case 0x4E: /* SKL */ case 0x5E: /* SKL */ return 1; } return 0; } /* * HSW adds support for additional MSRs: * * MSR_PKG_C8_RESIDENCY 0x00000630 * MSR_PKG_C9_RESIDENCY 0x00000631 * MSR_PKG_C10_RESIDENCY 0x00000632 */ int has_hsw_msrs(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; switch (model) { case 0x45: /* HSW */ case 0x3D: /* BDW */ case 0x4E: /* SKL */ case 0x5E: /* SKL */ return 1; } return 0; } /* * SKL adds support for additional MSRS: * * MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658 * MSR_PKG_ANY_CORE_C0_RES 0x00000659 * MSR_PKG_ANY_GFXE_C0_RES 0x0000065A * MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B */ int has_skl_msrs(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; switch (model) { case 0x4E: /* SKL */ case 0x5E: /* SKL */ return 1; } return 0; } int is_slm(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; switch (model) { case 0x37: /* BYT */ case 0x4D: /* AVN */ return 1; } return 0; } int is_knl(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; switch (model) { case 0x57: /* KNL */ return 1; } return 0; } unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model) { if (is_knl(family, model)) return 1024; return 1; } #define SLM_BCLK_FREQS 5 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; double slm_bclk(void) { unsigned long long msr = 3; unsigned int i; double freq; if (get_msr(base_cpu, MSR_FSB_FREQ, &msr)) fprintf(stderr, "SLM BCLK: unknown\n"); i = msr & 0xf; if (i >= SLM_BCLK_FREQS) { fprintf(stderr, "SLM BCLK[%d] invalid\n", i); msr = 3; } freq = slm_freq_table[i]; fprintf(stderr, "SLM BCLK: %.1f Mhz\n", freq); return freq; } double discover_bclk(unsigned int family, unsigned int model) { if (has_snb_msrs(family, model)) return 100.00; else if (is_slm(family, model)) return slm_bclk(); else return 133.33; } /* * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where * the Thermal Control Circuit (TCC) activates. * This is usually equal to tjMax. * * Older processors do not have this MSR, so there we guess, * but also allow cmdline over-ride with -T. * * Several MSR temperature values are in units of degrees-C * below this value, including the Digital Thermal Sensor (DTS), * Package Thermal Management Sensor (PTM), and thermal event thresholds. */ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; unsigned int target_c_local; int cpu; /* tcc_activation_temp is used only for dts or ptm */ if (!(do_dts || do_ptm)) return 0; /* this is a per-package concept */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; cpu = t->cpu_id; if (cpu_migrate(cpu)) { fprintf(stderr, "Could not migrate to CPU %d\n", cpu); return -1; } if (tcc_activation_temp_override != 0) { tcc_activation_temp = tcc_activation_temp_override; fprintf(stderr, "cpu%d: Using cmdline TCC Target (%d C)\n", cpu, tcc_activation_temp); return 0; } /* Temperature Target MSR is Nehalem and newer only */ if (!do_nhm_platform_info) goto guess; if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr)) goto guess; target_c_local = (msr >> 16) & 0xFF; if (debug) fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n", cpu, msr, target_c_local); if (!target_c_local) goto guess; tcc_activation_temp = target_c_local; return 0; guess: tcc_activation_temp = TJMAX_DEFAULT; fprintf(stderr, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n", cpu, tcc_activation_temp); return 0; } void process_cpuid() { unsigned int eax, ebx, ecx, edx, max_level; unsigned int fms, family, model, stepping; eax = ebx = ecx = edx = 0; __get_cpuid(0, &max_level, &ebx, &ecx, &edx); if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e) genuine_intel = 1; if (debug) fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ", (char *)&ebx, (char *)&edx, (char *)&ecx); __get_cpuid(1, &fms, &ebx, &ecx, &edx); family = (fms >> 8) & 0xf; model = (fms >> 4) & 0xf; stepping = fms & 0xf; if (family == 6 || family == 0xf) model += ((fms >> 16) & 0xf) << 4; if (debug) fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n", max_level, family, model, stepping, family, model, stepping); if (!(edx & (1 << 5))) errx(1, "CPUID: no MSR"); /* * check max extended function levels of CPUID. * This is needed to check for invariant TSC. * This check is valid for both Intel and AMD. */ ebx = ecx = edx = 0; __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx); if (max_level >= 0x80000007) { /* * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8 * this check is valid for both Intel and AMD */ __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx); has_invariant_tsc = edx & (1 << 8); } /* * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0 * this check is valid for both Intel and AMD */ __get_cpuid(0x6, &eax, &ebx, &ecx, &edx); has_aperf = ecx & (1 << 0); do_dts = eax & (1 << 0); do_ptm = eax & (1 << 6); has_epb = ecx & (1 << 3); if (debug) fprintf(stderr, "CPUID(6): %sAPERF, %sDTS, %sPTM, %sEPB\n", has_aperf ? "" : "No ", do_dts ? "" : "No ", do_ptm ? "" : "No ", has_epb ? "" : "No "); if (max_level > 0x15) { unsigned int eax_crystal; unsigned int ebx_tsc; /* * CPUID 15H TSC/Crystal ratio, possibly Crystal Hz */ eax_crystal = ebx_tsc = crystal_hz = edx = 0; __get_cpuid(0x15, &eax_crystal, &ebx_tsc, &crystal_hz, &edx); if (ebx_tsc != 0) { if (debug && (ebx != 0)) fprintf(stderr, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n", eax_crystal, ebx_tsc, crystal_hz); if (crystal_hz == 0) switch(model) { case 0x4E: /* SKL */ case 0x5E: /* SKL */ crystal_hz = 24000000; /* 24 MHz */ break; default: crystal_hz = 0; } if (crystal_hz) { tsc_hz = (unsigned long long) crystal_hz * ebx_tsc / eax_crystal; if (debug) fprintf(stderr, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n", tsc_hz / 1000000, crystal_hz, ebx_tsc, eax_crystal); } } } if (has_aperf) aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model); do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); do_snb_cstates = has_snb_msrs(family, model); do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); do_pc3 = (pkg_cstate_limit >= PCL__3); do_pc6 = (pkg_cstate_limit >= PCL__6); do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7); do_c8_c9_c10 = has_hsw_msrs(family, model); do_skl_residency = has_skl_msrs(family, model); do_slm_cstates = is_slm(family, model); do_knl_cstates = is_knl(family, model); rapl_probe(family, model); perf_limit_reasons_probe(family, model); if (debug) dump_cstate_pstate_config_info(); if (has_skl_msrs(family, model)) calculate_tsc_tweak(); return; } void help() { fprintf(stderr, "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n" "\n" "Turbostat forks the specified COMMAND and prints statistics\n" "when COMMAND completes.\n" "If no COMMAND is specified, turbostat wakes every 5-seconds\n" "to print statistics, until interrupted.\n" "--debug run in \"debug\" mode\n" "--interval sec Override default 5-second measurement interval\n" "--help print this help message\n" "--counter msr print 32-bit counter at address \"msr\"\n" "--Counter msr print 64-bit Counter at address \"msr\"\n" "--msr msr print 32-bit value at address \"msr\"\n" "--MSR msr print 64-bit Value at address \"msr\"\n" "--version print version information\n" "\n" "For more help, run \"man turbostat\"\n"); } /* * in /dev/cpu/ return success for names that are numbers * ie. filter out ".", "..", "microcode". */ int dir_filter(const struct dirent *dirp) { if (isdigit(dirp->d_name[0])) return 1; else return 0; } int open_dev_cpu_msr(int dummy1) { return 0; } void topology_probe() { int i; int max_core_id = 0; int max_package_id = 0; int max_siblings = 0; struct cpu_topology { int core_id; int physical_package_id; } *cpus; /* Initialize num_cpus, max_cpu_num */ topo.num_cpus = 0; topo.max_cpu_num = 0; for_all_proc_cpus(count_cpus); if (!summary_only && topo.num_cpus > 1) show_cpu = 1; if (debug > 1) fprintf(stderr, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num); cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology)); if (cpus == NULL) err(1, "calloc cpus"); /* * Allocate and initialize cpu_present_set */ cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1)); if (cpu_present_set == NULL) err(3, "CPU_ALLOC"); cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); CPU_ZERO_S(cpu_present_setsize, cpu_present_set); for_all_proc_cpus(mark_cpu_present); /* * Allocate and initialize cpu_affinity_set */ cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1)); if (cpu_affinity_set == NULL) err(3, "CPU_ALLOC"); cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); /* * For online cpus * find max_core_id, max_package_id */ for (i = 0; i <= topo.max_cpu_num; ++i) { int siblings; if (cpu_is_not_present(i)) { if (debug > 1) fprintf(stderr, "cpu%d NOT PRESENT\n", i); continue; } cpus[i].core_id = get_core_id(i); if (cpus[i].core_id > max_core_id) max_core_id = cpus[i].core_id; cpus[i].physical_package_id = get_physical_package_id(i); if (cpus[i].physical_package_id > max_package_id) max_package_id = cpus[i].physical_package_id; siblings = get_num_ht_siblings(i); if (siblings > max_siblings) max_siblings = siblings; if (debug > 1) fprintf(stderr, "cpu %d pkg %d core %d\n", i, cpus[i].physical_package_id, cpus[i].core_id); } topo.num_cores_per_pkg = max_core_id + 1; if (debug > 1) fprintf(stderr, "max_core_id %d, sizing for %d cores per package\n", max_core_id, topo.num_cores_per_pkg); if (debug && !summary_only && topo.num_cores_per_pkg > 1) show_core = 1; topo.num_packages = max_package_id + 1; if (debug > 1) fprintf(stderr, "max_package_id %d, sizing for %d packages\n", max_package_id, topo.num_packages); if (debug && !summary_only && topo.num_packages > 1) show_pkg = 1; topo.num_threads_per_core = max_siblings; if (debug > 1) fprintf(stderr, "max_siblings %d\n", max_siblings); free(cpus); } void allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p) { int i; *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg * topo.num_packages, sizeof(struct thread_data)); if (*t == NULL) goto error; for (i = 0; i < topo.num_threads_per_core * topo.num_cores_per_pkg * topo.num_packages; i++) (*t)[i].cpu_id = -1; *c = calloc(topo.num_cores_per_pkg * topo.num_packages, sizeof(struct core_data)); if (*c == NULL) goto error; for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++) (*c)[i].core_id = -1; *p = calloc(topo.num_packages, sizeof(struct pkg_data)); if (*p == NULL) goto error; for (i = 0; i < topo.num_packages; i++) (*p)[i].package_id = i; return; error: err(1, "calloc counters"); } /* * init_counter() * * set cpu_id, core_num, pkg_num * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE * * increment topo.num_cores when 1st core in pkg seen */ void init_counter(struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base, int thread_num, int core_num, int pkg_num, int cpu_id) { struct thread_data *t; struct core_data *c; struct pkg_data *p; t = GET_THREAD(thread_base, thread_num, core_num, pkg_num); c = GET_CORE(core_base, core_num, pkg_num); p = GET_PKG(pkg_base, pkg_num); t->cpu_id = cpu_id; if (thread_num == 0) { t->flags |= CPU_IS_FIRST_THREAD_IN_CORE; if (cpu_is_first_core_in_package(cpu_id)) t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE; } c->core_id = core_num; p->package_id = pkg_num; } int initialize_counters(int cpu_id) { int my_thread_id, my_core_id, my_package_id; my_package_id = get_physical_package_id(cpu_id); my_core_id = get_core_id(cpu_id); my_thread_id = get_cpu_position_in_core(cpu_id); if (!my_thread_id) topo.num_cores++; init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); return 0; } void allocate_output_buffer() { output_buffer = calloc(1, (1 + topo.num_cpus) * 1024); outp = output_buffer; if (outp == NULL) err(-1, "calloc output buffer"); } void setup_all_buffers(void) { topology_probe(); allocate_counters(&thread_even, &core_even, &package_even); allocate_counters(&thread_odd, &core_odd, &package_odd); allocate_output_buffer(); for_all_proc_cpus(initialize_counters); } void set_base_cpu(void) { base_cpu = sched_getcpu(); if (base_cpu < 0) err(-ENODEV, "No valid cpus found"); if (debug > 1) fprintf(stderr, "base_cpu = %d\n", base_cpu); } void turbostat_init() { setup_all_buffers(); set_base_cpu(); check_dev_msr(); check_permissions(); process_cpuid(); if (debug) for_all_cpus(print_epb, ODD_COUNTERS); if (debug) for_all_cpus(print_perf_limit, ODD_COUNTERS); if (debug) for_all_cpus(print_rapl, ODD_COUNTERS); for_all_cpus(set_temperature_target, ODD_COUNTERS); if (debug) for_all_cpus(print_thermal, ODD_COUNTERS); } int fork_it(char **argv) { pid_t child_pid; int status; status = for_all_cpus(get_counters, EVEN_COUNTERS); if (status) exit(status); /* clear affinity side-effect of get_counters() */ sched_setaffinity(0, cpu_present_setsize, cpu_present_set); gettimeofday(&tv_even, (struct timezone *)NULL); child_pid = fork(); if (!child_pid) { /* child */ execvp(argv[0], argv); } else { /* parent */ if (child_pid == -1) err(1, "fork"); signal(SIGINT, SIG_IGN); signal(SIGQUIT, SIG_IGN); if (waitpid(child_pid, &status, 0) == -1) err(status, "waitpid"); } /* * n.b. fork_it() does not check for errors from for_all_cpus() * because re-starting is problematic when forking */ for_all_cpus(get_counters, ODD_COUNTERS); gettimeofday(&tv_odd, (struct timezone *)NULL); timersub(&tv_odd, &tv_even, &tv_delta); for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS); compute_average(EVEN_COUNTERS); format_all_counters(EVEN_COUNTERS); flush_stderr(); fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0); return status; } int get_and_dump_counters(void) { int status; status = for_all_cpus(get_counters, ODD_COUNTERS); if (status) return status; status = for_all_cpus(dump_counters, ODD_COUNTERS); if (status) return status; flush_stdout(); return status; } void print_version() { fprintf(stderr, "turbostat version 4.8 26-Sep, 2015" " - Len Brown <lenb@kernel.org>\n"); } void cmdline(int argc, char **argv) { int opt; int option_index = 0; static struct option long_options[] = { {"Counter", required_argument, 0, 'C'}, {"counter", required_argument, 0, 'c'}, {"Dump", no_argument, 0, 'D'}, {"debug", no_argument, 0, 'd'}, {"interval", required_argument, 0, 'i'}, {"help", no_argument, 0, 'h'}, {"Joules", no_argument, 0, 'J'}, {"MSR", required_argument, 0, 'M'}, {"msr", required_argument, 0, 'm'}, {"Package", no_argument, 0, 'p'}, {"processor", no_argument, 0, 'p'}, {"Summary", no_argument, 0, 'S'}, {"TCC", required_argument, 0, 'T'}, {"version", no_argument, 0, 'v' }, {0, 0, 0, 0 } }; progname = argv[0]; while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:PpST:v", long_options, &option_index)) != -1) { switch (opt) { case 'C': sscanf(optarg, "%x", &extra_delta_offset64); break; case 'c': sscanf(optarg, "%x", &extra_delta_offset32); break; case 'D': dump_only++; break; case 'd': debug++; break; case 'h': default: help(); exit(1); case 'i': interval_sec = atoi(optarg); break; case 'J': rapl_joules++; break; case 'M': sscanf(optarg, "%x", &extra_msr_offset64); break; case 'm': sscanf(optarg, "%x", &extra_msr_offset32); break; case 'P': show_pkg_only++; break; case 'p': show_core_only++; break; case 'S': summary_only++; break; case 'T': tcc_activation_temp_override = atoi(optarg); break; case 'v': print_version(); exit(0); break; } } } int main(int argc, char **argv) { cmdline(argc, argv); if (debug) print_version(); turbostat_init(); /* dump counters and exit */ if (dump_only) return get_and_dump_counters(); /* * if any params left, it must be a command to fork */ if (argc - optind) return fork_it(argv + optind); else turbostat_loop(); return 0; }
gpl-2.0
nightscape/yoga-900-kernel
drivers/gpu/drm/i915/intel_guc_loader.c
160
18896
/* * Copyright © 2014 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Vinit Azad <vinit.azad@intel.com> * Ben Widawsky <ben@bwidawsk.net> * Dave Gordon <david.s.gordon@intel.com> * Alex Dai <yu.dai@intel.com> */ #include <linux/firmware.h> #include "i915_drv.h" #include "intel_guc.h" /** * DOC: GuC * * intel_guc: * Top level structure of guc. It handles firmware loading and manages client * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy * ExecList submission. * * Firmware versioning: * The firmware build process will generate a version header file with major and * minor version defined. The versions are built into CSS header of firmware. * i915 kernel driver set the minimal firmware version required per platform. * The firmware installation package will install (symbolic link) proper version * of firmware. * * GuC address space: * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP), * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM. * * Firmware log: * Firmware log is enabled by setting i915.guc_log_level to non-negative level. * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from * i915_guc_load_status will print out firmware loading status and scratch * registers value. * */ #define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin" MODULE_FIRMWARE(I915_SKL_GUC_UCODE); /* User-friendly representation of an enum */ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status) { switch (status) { case GUC_FIRMWARE_FAIL: return "FAIL"; case GUC_FIRMWARE_NONE: return "NONE"; case GUC_FIRMWARE_PENDING: return "PENDING"; case GUC_FIRMWARE_SUCCESS: return "SUCCESS"; default: return "UNKNOWN!"; } }; static void direct_interrupts_to_host(struct drm_i915_private *dev_priv) { struct intel_engine_cs *ring; int i, irqs; /* tell all command streamers NOT to forward interrupts and vblank to GuC */ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); for_each_ring(ring, dev_priv, i) I915_WRITE(RING_MODE_GEN7(ring), irqs); /* route all GT interrupts to the host */ I915_WRITE(GUC_BCS_RCS_IER, 0); I915_WRITE(GUC_VCS2_VCS1_IER, 0); I915_WRITE(GUC_WD_VECS_IER, 0); } static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv) { struct intel_engine_cs *ring; int i, irqs; /* tell all command streamers to forward interrupts and vblank to GuC */ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS); irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); for_each_ring(ring, dev_priv, i) I915_WRITE(RING_MODE_GEN7(ring), irqs); /* route USER_INTERRUPT to Host, all others are sent to GuC. */ irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; /* These three registers have the same bit definitions */ I915_WRITE(GUC_BCS_RCS_IER, ~irqs); I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs); I915_WRITE(GUC_WD_VECS_IER, ~irqs); } static u32 get_gttype(struct drm_i915_private *dev_priv) { /* XXX: GT type based on PCI device ID? field seems unused by fw */ return 0; } static u32 get_core_family(struct drm_i915_private *dev_priv) { switch (INTEL_INFO(dev_priv)->gen) { case 9: return GFXCORE_FAMILY_GEN9; default: DRM_ERROR("GUC: unsupported core family\n"); return GFXCORE_FAMILY_UNKNOWN; } } static void set_guc_init_params(struct drm_i915_private *dev_priv) { struct intel_guc *guc = &dev_priv->guc; u32 params[GUC_CTL_MAX_DWORDS]; int i; memset(&params, 0, sizeof(params)); params[GUC_CTL_DEVICE_INFO] |= (get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) | (get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT); /* * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one * second. This ARAR is calculated by: * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10 */ params[GUC_CTL_ARAT_HIGH] = 0; params[GUC_CTL_ARAT_LOW] = 100000000; params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER; params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER | GUC_CTL_VCS2_ENABLED; if (i915.guc_log_level >= 0) { params[GUC_CTL_LOG_PARAMS] = guc->log_flags; params[GUC_CTL_DEBUG] = i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT; } /* If GuC submission is enabled, set up additional parameters here */ if (i915.enable_guc_submission) { u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj); u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16; pgs >>= PAGE_SHIFT; params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) | (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT); params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS; /* Unmask this bit to enable the GuC's internal scheduler */ params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER; } I915_WRITE(SOFT_SCRATCH(0), 0); for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) I915_WRITE(SOFT_SCRATCH(1 + i), params[i]); } /* * Read the GuC status register (GUC_STATUS) and store it in the * specified location; then return a boolean indicating whether * the value matches either of two values representing completion * of the GuC boot process. * * This is used for polling the GuC status in a wait_for_atomic() * loop below. */ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv, u32 *status) { u32 val = I915_READ(GUC_STATUS); u32 uk_val = val & GS_UKERNEL_MASK; *status = val; return (uk_val == GS_UKERNEL_READY || ((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE)); } /* * Transfer the firmware image to RAM for execution by the microcontroller. * * GuC Firmware layout: * +-------------------------------+ ---- * | CSS header | 128B * | contains major/minor version | * +-------------------------------+ ---- * | uCode | * +-------------------------------+ ---- * | RSA signature | 256B * +-------------------------------+ ---- * * Architecturally, the DMA engine is bidirectional, and can potentially even * transfer between GTT locations. This functionality is left out of the API * for now as there is no need for it. * * Note that GuC needs the CSS header plus uKernel code to be copied by the * DMA engine in one operation, whereas the RSA signature is loaded via MMIO. */ #define UOS_CSS_HEADER_OFFSET 0 #define UOS_VER_MINOR_OFFSET 0x44 #define UOS_VER_MAJOR_OFFSET 0x46 #define UOS_CSS_HEADER_SIZE 0x80 #define UOS_RSA_SIG_SIZE 0x100 static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv) { struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj; unsigned long offset; struct sg_table *sg = fw_obj->pages; u32 status, ucode_size, rsa[UOS_RSA_SIG_SIZE / sizeof(u32)]; int i, ret = 0; /* uCode size, also is where RSA signature starts */ offset = ucode_size = guc_fw->guc_fw_size - UOS_RSA_SIG_SIZE; I915_WRITE(DMA_COPY_SIZE, ucode_size); /* Copy RSA signature from the fw image to HW for verification */ sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, UOS_RSA_SIG_SIZE, offset); for (i = 0; i < UOS_RSA_SIG_SIZE / sizeof(u32); i++) I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); /* Set the source address for the new blob */ offset = i915_gem_obj_ggtt_offset(fw_obj); I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); /* * Set the DMA destination. Current uCode expects the code to be * loaded at 8k; locations below this are used for the stack. */ I915_WRITE(DMA_ADDR_1_LOW, 0x2000); I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); /* Finally start the DMA */ I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA)); /* * Spin-wait for the DMA to complete & the GuC to start up. * NB: Docs recommend not using the interrupt for completion. * Measurements indicate this should take no more than 20ms, so a * timeout here indicates that the GuC has failed and is unusable. * (Higher levels of the driver will attempt to fall back to * execlist mode if this happens.) */ ret = wait_for_atomic(guc_ucode_response(dev_priv, &status), 100); DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n", I915_READ(DMA_CTRL), status); if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) { DRM_ERROR("GuC firmware signature verification failed\n"); ret = -ENOEXEC; } DRM_DEBUG_DRIVER("returning %d\n", ret); return ret; } /* * Load the GuC firmware blob into the MinuteIA. */ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) { struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; struct drm_device *dev = dev_priv->dev; int ret; ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false); if (ret) { DRM_DEBUG_DRIVER("set-domain failed %d\n", ret); return ret; } ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0); if (ret) { DRM_DEBUG_DRIVER("pin failed %d\n", ret); return ret; } /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */ I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); /* init WOPCM */ I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE); I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE); /* Enable MIA caching. GuC clock gating is disabled. */ I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE); /* WaDisableMinuteIaClockGating:skl,bxt */ if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) { I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) & ~GUC_ENABLE_MIA_CLOCK_GATING)); } /* WaC6DisallowByGfxPause*/ I915_WRITE(GEN6_GFXPAUSE, 0x30FFF); if (IS_BROXTON(dev)) I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE); else I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE); if (IS_GEN9(dev)) { /* DOP Clock Gating Enable for GuC clocks */ I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE | I915_READ(GEN7_MISCCPCTL))); /* allows for 5us before GT can go to RC6 */ I915_WRITE(GUC_ARAT_C6DIS, 0x1FF); } set_guc_init_params(dev_priv); ret = guc_ucode_xfer_dma(dev_priv); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); /* * We keep the object pages for reuse during resume. But we can unpin it * now that DMA has completed, so it doesn't continue to take up space. */ i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj); return ret; } /** * intel_guc_ucode_load() - load GuC uCode into the device * @dev: drm device * * Called from gem_init_hw() during driver loading and also after a GPU reset. * * The firmware image should have already been fetched into memory by the * earlier call to intel_guc_ucode_init(), so here we need only check that * is succeeded, and then transfer the image to the h/w. * * Return: non-zero code on error */ int intel_guc_ucode_load(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; int err = 0; DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n", intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); direct_interrupts_to_host(dev_priv); if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE) return 0; if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS && guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) return -ENOEXEC; guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING; DRM_DEBUG_DRIVER("GuC fw fetch status %s\n", intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status)); switch (guc_fw->guc_fw_fetch_status) { case GUC_FIRMWARE_FAIL: /* something went wrong :( */ err = -EIO; goto fail; case GUC_FIRMWARE_NONE: case GUC_FIRMWARE_PENDING: default: /* "can't happen" */ WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n", guc_fw->guc_fw_path, intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), guc_fw->guc_fw_fetch_status); err = -ENXIO; goto fail; case GUC_FIRMWARE_SUCCESS: break; } err = i915_guc_submission_init(dev); if (err) goto fail; err = guc_ucode_xfer(dev_priv); if (err) goto fail; guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS; DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n", intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); if (i915.enable_guc_submission) { /* The execbuf_client will be recreated. Release it first. */ i915_guc_submission_disable(dev); err = i915_guc_submission_enable(dev); if (err) goto fail; direct_interrupts_to_guc(dev_priv); } return 0; fail: if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING) guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL; direct_interrupts_to_host(dev_priv); i915_guc_submission_disable(dev); return err; } static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) { struct drm_i915_gem_object *obj; const struct firmware *fw; const u8 *css_header; const size_t minsize = UOS_CSS_HEADER_SIZE + UOS_RSA_SIG_SIZE; const size_t maxsize = GUC_WOPCM_SIZE_VALUE + UOS_RSA_SIG_SIZE - 0x8000; /* 32k reserved (8K stack + 24k context) */ int err; DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n", intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status)); err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev); if (err) goto fail; if (!fw) goto fail; DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n", guc_fw->guc_fw_path, fw); DRM_DEBUG_DRIVER("firmware file size %zu (minimum %zu, maximum %zu)\n", fw->size, minsize, maxsize); /* Check the size of the blob befoe examining buffer contents */ if (fw->size < minsize || fw->size > maxsize) goto fail; /* * The GuC firmware image has the version number embedded at a well-known * offset within the firmware blob; note that major / minor version are * TWO bytes each (i.e. u16), although all pointers and offsets are defined * in terms of bytes (u8). */ css_header = fw->data + UOS_CSS_HEADER_OFFSET; guc_fw->guc_fw_major_found = *(u16 *)(css_header + UOS_VER_MAJOR_OFFSET); guc_fw->guc_fw_minor_found = *(u16 *)(css_header + UOS_VER_MINOR_OFFSET); if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted || guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) { DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n", guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found, guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted); err = -ENOEXEC; goto fail; } DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n", guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found, guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted); mutex_lock(&dev->struct_mutex); obj = i915_gem_object_create_from_data(dev, fw->data, fw->size); mutex_unlock(&dev->struct_mutex); if (IS_ERR_OR_NULL(obj)) { err = obj ? PTR_ERR(obj) : -ENOMEM; goto fail; } guc_fw->guc_fw_obj = obj; guc_fw->guc_fw_size = fw->size; DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n", guc_fw->guc_fw_obj); release_firmware(fw); guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS; return; fail: DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n", err, fw, guc_fw->guc_fw_obj); DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n", guc_fw->guc_fw_path, err); obj = guc_fw->guc_fw_obj; if (obj) drm_gem_object_unreference(&obj->base); guc_fw->guc_fw_obj = NULL; release_firmware(fw); /* OK even if fw is NULL */ guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL; } /** * intel_guc_ucode_init() - define parameters and fetch firmware * @dev: drm device * * Called early during driver load, but after GEM is initialised. * * The firmware will be transferred to the GuC's memory later, * when intel_guc_ucode_load() is called. */ void intel_guc_ucode_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; const char *fw_path; if (!HAS_GUC_SCHED(dev)) i915.enable_guc_submission = false; if (!HAS_GUC_UCODE(dev)) { fw_path = NULL; } else if (IS_SKYLAKE(dev)) { fw_path = I915_SKL_GUC_UCODE; guc_fw->guc_fw_major_wanted = 4; guc_fw->guc_fw_minor_wanted = 3; } else { i915.enable_guc_submission = false; fw_path = ""; /* unknown device */ } guc_fw->guc_dev = dev; guc_fw->guc_fw_path = fw_path; guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE; if (fw_path == NULL) return; if (*fw_path == '\0') { DRM_ERROR("No GuC firmware known for this platform\n"); guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL; return; } guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING; DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path); guc_fw_fetch(dev, guc_fw); /* status must now be FAIL or SUCCESS */ } /** * intel_guc_ucode_fini() - clean up all allocated resources * @dev: drm device */ void intel_guc_ucode_fini(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; direct_interrupts_to_host(dev_priv); i915_guc_submission_fini(dev); mutex_lock(&dev->struct_mutex); if (guc_fw->guc_fw_obj) drm_gem_object_unreference(&guc_fw->guc_fw_obj->base); guc_fw->guc_fw_obj = NULL; mutex_unlock(&dev->struct_mutex); guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; }
gpl-2.0
pursuitxh/Linux-3.2.50
arch/arm/mach-s5pc100/dma.c
160
5891
/* linux/arch/arm/mach-s5pc100/dma.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Copyright (C) 2010 Samsung Electronics Co. Ltd. * Jaswinder Singh <jassi.brar@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/dma-mapping.h> #include <linux/amba/bus.h> #include <linux/amba/pl330.h> #include <asm/irq.h> #include <plat/devs.h> #include <plat/irqs.h> #include <mach/map.h> #include <mach/irqs.h> #include <mach/dma.h> static u64 dma_dmamask = DMA_BIT_MASK(32); struct dma_pl330_peri pdma0_peri[30] = { { .peri_id = (u8)DMACH_UART0_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_UART0_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_UART1_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_UART1_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_UART2_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_UART2_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_UART3_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_UART3_TX, .rqtype = MEMTODEV, }, { .peri_id = DMACH_IRDA, }, { .peri_id = (u8)DMACH_I2S0_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_I2S0_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_I2S0S_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_I2S1_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_I2S1_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_I2S2_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_I2S2_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_SPI0_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_SPI0_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_SPI1_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_SPI1_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_SPI2_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_SPI2_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_AC97_MICIN, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_AC97_PCMIN, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_AC97_PCMOUT, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_EXTERNAL, }, { .peri_id = (u8)DMACH_PWM, }, { .peri_id = (u8)DMACH_SPDIF, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_HSI_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_HSI_TX, .rqtype = MEMTODEV, }, }; struct dma_pl330_platdata s5pc100_pdma0_pdata = { .nr_valid_peri = ARRAY_SIZE(pdma0_peri), .peri = pdma0_peri, }; struct amba_device s5pc100_device_pdma0 = { .dev = { .init_name = "dma-pl330.0", .dma_mask = &dma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &s5pc100_pdma0_pdata, }, .res = { .start = S5PC100_PA_PDMA0, .end = S5PC100_PA_PDMA0 + SZ_4K, .flags = IORESOURCE_MEM, }, .irq = {IRQ_PDMA0, NO_IRQ}, .periphid = 0x00041330, }; struct dma_pl330_peri pdma1_peri[30] = { { .peri_id = (u8)DMACH_UART0_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_UART0_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_UART1_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_UART1_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_UART2_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_UART2_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_UART3_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_UART3_TX, .rqtype = MEMTODEV, }, { .peri_id = DMACH_IRDA, }, { .peri_id = (u8)DMACH_I2S0_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_I2S0_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_I2S0S_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_I2S1_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_I2S1_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_I2S2_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_I2S2_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_SPI0_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_SPI0_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_SPI1_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_SPI1_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_SPI2_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_SPI2_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_PCM0_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_PCM1_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_PCM1_RX, .rqtype = DEVTOMEM, }, { .peri_id = (u8)DMACH_PCM1_TX, .rqtype = MEMTODEV, }, { .peri_id = (u8)DMACH_MSM_REQ0, }, { .peri_id = (u8)DMACH_MSM_REQ1, }, { .peri_id = (u8)DMACH_MSM_REQ2, }, { .peri_id = (u8)DMACH_MSM_REQ3, }, }; struct dma_pl330_platdata s5pc100_pdma1_pdata = { .nr_valid_peri = ARRAY_SIZE(pdma1_peri), .peri = pdma1_peri, }; struct amba_device s5pc100_device_pdma1 = { .dev = { .init_name = "dma-pl330.1", .dma_mask = &dma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &s5pc100_pdma1_pdata, }, .res = { .start = S5PC100_PA_PDMA1, .end = S5PC100_PA_PDMA1 + SZ_4K, .flags = IORESOURCE_MEM, }, .irq = {IRQ_PDMA1, NO_IRQ}, .periphid = 0x00041330, }; static int __init s5pc100_dma_init(void) { amba_device_register(&s5pc100_device_pdma0, &iomem_resource); amba_device_register(&s5pc100_device_pdma1, &iomem_resource); return 0; } arch_initcall(s5pc100_dma_init);
gpl-2.0
0x20c24/linux-psec
net/ipv6/esp6.c
160
15739
/* * Copyright (C)2002 USAGI/WIDE Project * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. * * Authors * * Mitsuru KANDA @USAGI : IPv6 Support * Kazunori MIYAZAWA @USAGI : * Kunihiro Ishiguro <kunihiro@ipinfusion.com> * * This file is derived from net/ipv4/esp.c */ #define pr_fmt(fmt) "IPv6: " fmt #include <crypto/aead.h> #include <crypto/authenc.h> #include <linux/err.h> #include <linux/module.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/esp.h> #include <linux/scatterlist.h> #include <linux/kernel.h> #include <linux/pfkeyv2.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <net/ip6_route.h> #include <net/icmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <linux/icmpv6.h> struct esp_skb_cb { struct xfrm_skb_cb xfrm; void *tmp; }; #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) static u32 esp6_get_mtu(struct xfrm_state *x, int mtu); /* * Allocate an AEAD request structure with extra space for SG and IV. * * For alignment considerations the upper 32 bits of the sequence number are * placed at the front, if present. Followed by the IV, the request and finally * the SG list. * * TODO: Use spare space in skb for this where possible. */ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen) { unsigned int len; len = seqihlen; len += crypto_aead_ivsize(aead); if (len) { len += crypto_aead_alignmask(aead) & ~(crypto_tfm_ctx_alignment() - 1); len = ALIGN(len, crypto_tfm_ctx_alignment()); } len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead); len = ALIGN(len, __alignof__(struct scatterlist)); len += sizeof(struct scatterlist) * nfrags; return kmalloc(len, GFP_ATOMIC); } static inline __be32 *esp_tmp_seqhi(void *tmp) { return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32)); } static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) { return crypto_aead_ivsize(aead) ? PTR_ALIGN((u8 *)tmp + seqhilen, crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; } static inline struct aead_givcrypt_request *esp_tmp_givreq( struct crypto_aead *aead, u8 *iv) { struct aead_givcrypt_request *req; req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), crypto_tfm_ctx_alignment()); aead_givcrypt_set_tfm(req, aead); return req; } static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) { struct aead_request *req; req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), crypto_tfm_ctx_alignment()); aead_request_set_tfm(req, aead); return req; } static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, struct aead_request *req) { return (void *)ALIGN((unsigned long)(req + 1) + crypto_aead_reqsize(aead), __alignof__(struct scatterlist)); } static inline struct scatterlist *esp_givreq_sg( struct crypto_aead *aead, struct aead_givcrypt_request *req) { return (void *)ALIGN((unsigned long)(req + 1) + crypto_aead_reqsize(aead), __alignof__(struct scatterlist)); } static void esp_output_done(struct crypto_async_request *base, int err) { struct sk_buff *skb = base->data; kfree(ESP_SKB_CB(skb)->tmp); xfrm_output_resume(skb, err); } static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) { int err; struct ip_esp_hdr *esph; struct crypto_aead *aead; struct aead_givcrypt_request *req; struct scatterlist *sg; struct scatterlist *asg; struct sk_buff *trailer; void *tmp; int blksize; int clen; int alen; int plen; int tfclen; int nfrags; int assoclen; int sglists; int seqhilen; u8 *iv; u8 *tail; __be32 *seqhi; /* skb is pure payload to encrypt */ aead = x->data; alen = crypto_aead_authsize(aead); tfclen = 0; if (x->tfcpad) { struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); u32 padto; padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached)); if (skb->len < padto) tfclen = padto - skb->len; } blksize = ALIGN(crypto_aead_blocksize(aead), 4); clen = ALIGN(skb->len + 2 + tfclen, blksize); plen = clen - skb->len - tfclen; err = skb_cow_data(skb, tfclen + plen + alen, &trailer); if (err < 0) goto error; nfrags = err; assoclen = sizeof(*esph); sglists = 1; seqhilen = 0; if (x->props.flags & XFRM_STATE_ESN) { sglists += 2; seqhilen += sizeof(__be32); assoclen += seqhilen; } tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); if (!tmp) { err = -ENOMEM; goto error; } seqhi = esp_tmp_seqhi(tmp); iv = esp_tmp_iv(aead, tmp, seqhilen); req = esp_tmp_givreq(aead, iv); asg = esp_givreq_sg(aead, req); sg = asg + sglists; /* Fill padding... */ tail = skb_tail_pointer(trailer); if (tfclen) { memset(tail, 0, tfclen); tail += tfclen; } do { int i; for (i = 0; i < plen - 2; i++) tail[i] = i + 1; } while (0); tail[plen - 2] = plen - 2; tail[plen - 1] = *skb_mac_header(skb); pskb_put(skb, trailer, clen - skb->len + alen); skb_push(skb, -skb_network_offset(skb)); esph = ip_esp_hdr(skb); *skb_mac_header(skb) = IPPROTO_ESP; esph->spi = x->id.spi; esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, esph->enc_data + crypto_aead_ivsize(aead) - skb->data, clen + alen); if ((x->props.flags & XFRM_STATE_ESN)) { sg_init_table(asg, 3); sg_set_buf(asg, &esph->spi, sizeof(__be32)); *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi); sg_set_buf(asg + 1, seqhi, seqhilen); sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32)); } else sg_init_one(asg, esph, sizeof(*esph)); aead_givcrypt_set_callback(req, 0, esp_output_done, skb); aead_givcrypt_set_crypt(req, sg, sg, clen, iv); aead_givcrypt_set_assoc(req, asg, assoclen); aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq.output.low + ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); ESP_SKB_CB(skb)->tmp = tmp; err = crypto_aead_givencrypt(req); if (err == -EINPROGRESS) goto error; if (err == -EBUSY) err = NET_XMIT_DROP; kfree(tmp); error: return err; } static int esp_input_done2(struct sk_buff *skb, int err) { struct xfrm_state *x = xfrm_input_state(skb); struct crypto_aead *aead = x->data; int alen = crypto_aead_authsize(aead); int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); int elen = skb->len - hlen; int hdr_len = skb_network_header_len(skb); int padlen; u8 nexthdr[2]; kfree(ESP_SKB_CB(skb)->tmp); if (unlikely(err)) goto out; if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2)) BUG(); err = -EINVAL; padlen = nexthdr[0]; if (padlen + 2 + alen >= elen) { net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen + 2, elen - alen); goto out; } /* ... check padding bits here. Silly. :-) */ pskb_trim(skb, skb->len - alen - padlen - 2); __skb_pull(skb, hlen); if (x->props.mode == XFRM_MODE_TUNNEL) skb_reset_transport_header(skb); else skb_set_transport_header(skb, -hdr_len); err = nexthdr[1]; /* RFC4303: Drop dummy packets without any error */ if (err == IPPROTO_NONE) err = -EINVAL; out: return err; } static void esp_input_done(struct crypto_async_request *base, int err) { struct sk_buff *skb = base->data; xfrm_input_resume(skb, esp_input_done2(skb, err)); } static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) { struct ip_esp_hdr *esph; struct crypto_aead *aead = x->data; struct aead_request *req; struct sk_buff *trailer; int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); int nfrags; int assoclen; int sglists; int seqhilen; int ret = 0; void *tmp; __be32 *seqhi; u8 *iv; struct scatterlist *sg; struct scatterlist *asg; if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) { ret = -EINVAL; goto out; } if (elen <= 0) { ret = -EINVAL; goto out; } nfrags = skb_cow_data(skb, 0, &trailer); if (nfrags < 0) { ret = -EINVAL; goto out; } ret = -ENOMEM; assoclen = sizeof(*esph); sglists = 1; seqhilen = 0; if (x->props.flags & XFRM_STATE_ESN) { sglists += 2; seqhilen += sizeof(__be32); assoclen += seqhilen; } tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); if (!tmp) goto out; ESP_SKB_CB(skb)->tmp = tmp; seqhi = esp_tmp_seqhi(tmp); iv = esp_tmp_iv(aead, tmp, seqhilen); req = esp_tmp_req(aead, iv); asg = esp_req_sg(aead, req); sg = asg + sglists; skb->ip_summed = CHECKSUM_NONE; esph = (struct ip_esp_hdr *)skb->data; /* Get ivec. This can be wrong, check against another impls. */ iv = esph->enc_data; sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); if ((x->props.flags & XFRM_STATE_ESN)) { sg_init_table(asg, 3); sg_set_buf(asg, &esph->spi, sizeof(__be32)); *seqhi = XFRM_SKB_CB(skb)->seq.input.hi; sg_set_buf(asg + 1, seqhi, seqhilen); sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32)); } else sg_init_one(asg, esph, sizeof(*esph)); aead_request_set_callback(req, 0, esp_input_done, skb); aead_request_set_crypt(req, sg, sg, elen, iv); aead_request_set_assoc(req, asg, assoclen); ret = crypto_aead_decrypt(req); if (ret == -EINPROGRESS) goto out; ret = esp_input_done2(skb, ret); out: return ret; } static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) { struct crypto_aead *aead = x->data; u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4); unsigned int net_adj; if (x->props.mode != XFRM_MODE_TUNNEL) net_adj = sizeof(struct ipv6hdr); else net_adj = 0; return ((mtu - x->props.header_len - crypto_aead_authsize(aead) - net_adj) & ~(blksize - 1)) + net_adj - 2; } static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { struct net *net = dev_net(skb->dev); const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data; struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); struct xfrm_state *x; if (type != ICMPV6_PKT_TOOBIG && type != NDISC_REDIRECT) return 0; x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6); if (!x) return 0; if (type == NDISC_REDIRECT) ip6_redirect(skb, net, skb->dev->ifindex, 0); else ip6_update_pmtu(skb, net, info, 0, 0); xfrm_state_put(x); return 0; } static void esp6_destroy(struct xfrm_state *x) { struct crypto_aead *aead = x->data; if (!aead) return; crypto_free_aead(aead); } static int esp_init_aead(struct xfrm_state *x) { struct crypto_aead *aead; int err; aead = crypto_alloc_aead(x->aead->alg_name, 0, 0); err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; x->data = aead; err = crypto_aead_setkey(aead, x->aead->alg_key, (x->aead->alg_key_len + 7) / 8); if (err) goto error; err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8); if (err) goto error; error: return err; } static int esp_init_authenc(struct xfrm_state *x) { struct crypto_aead *aead; struct crypto_authenc_key_param *param; struct rtattr *rta; char *key; char *p; char authenc_name[CRYPTO_MAX_ALG_NAME]; unsigned int keylen; int err; err = -EINVAL; if (!x->ealg) goto error; err = -ENAMETOOLONG; if ((x->props.flags & XFRM_STATE_ESN)) { if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authencesn(%s,%s)", x->aalg ? x->aalg->alg_name : "digest_null", x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) goto error; } else { if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)", x->aalg ? x->aalg->alg_name : "digest_null", x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) goto error; } aead = crypto_alloc_aead(authenc_name, 0, 0); err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; x->data = aead; keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); err = -ENOMEM; key = kmalloc(keylen, GFP_KERNEL); if (!key) goto error; p = key; rta = (void *)p; rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; rta->rta_len = RTA_LENGTH(sizeof(*param)); param = RTA_DATA(rta); p += RTA_SPACE(sizeof(*param)); if (x->aalg) { struct xfrm_algo_desc *aalg_desc; memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); p += (x->aalg->alg_key_len + 7) / 8; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); err = -EINVAL; if (aalg_desc->uinfo.auth.icv_fullbits / 8 != crypto_aead_authsize(aead)) { pr_info("ESP: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_aead_authsize(aead), aalg_desc->uinfo.auth.icv_fullbits / 8); goto free_key; } err = crypto_aead_setauthsize( aead, x->aalg->alg_trunc_len / 8); if (err) goto free_key; } param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); err = crypto_aead_setkey(aead, key, keylen); free_key: kfree(key); error: return err; } static int esp6_init_state(struct xfrm_state *x) { struct crypto_aead *aead; u32 align; int err; if (x->encap) return -EINVAL; x->data = NULL; if (x->aead) err = esp_init_aead(x); else err = esp_init_authenc(x); if (err) goto error; aead = x->data; x->props.header_len = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); switch (x->props.mode) { case XFRM_MODE_BEET: if (x->sel.family != AF_INET6) x->props.header_len += IPV4_BEET_PHMAXLEN + (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); break; case XFRM_MODE_TRANSPORT: break; case XFRM_MODE_TUNNEL: x->props.header_len += sizeof(struct ipv6hdr); break; default: goto error; } align = ALIGN(crypto_aead_blocksize(aead), 4); x->props.trailer_len = align + 1 + crypto_aead_authsize(aead); error: return err; } static int esp6_rcv_cb(struct sk_buff *skb, int err) { return 0; } static const struct xfrm_type esp6_type = { .description = "ESP6", .owner = THIS_MODULE, .proto = IPPROTO_ESP, .flags = XFRM_TYPE_REPLAY_PROT, .init_state = esp6_init_state, .destructor = esp6_destroy, .get_mtu = esp6_get_mtu, .input = esp6_input, .output = esp6_output, .hdr_offset = xfrm6_find_1stfragopt, }; static struct xfrm6_protocol esp6_protocol = { .handler = xfrm6_rcv, .cb_handler = esp6_rcv_cb, .err_handler = esp6_err, .priority = 0, }; static int __init esp6_init(void) { if (xfrm_register_type(&esp6_type, AF_INET6) < 0) { pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) { pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&esp6_type, AF_INET6); return -EAGAIN; } return 0; } static void __exit esp6_fini(void) { if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0) pr_info("%s: can't remove protocol\n", __func__); if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0) pr_info("%s: can't remove xfrm type\n", __func__); } module_init(esp6_init); module_exit(esp6_fini); MODULE_LICENSE("GPL"); MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
gpl-2.0
ArthySundaram/chromeos-3.8
drivers/video/backlight/locomolcd.c
160
6389
/* * Backlight control code for Sharp Zaurus SL-5500 * * Copyright 2005 John Lenz <lenz@cs.wisc.edu> * Maintainer: Pavel Machek <pavel@ucw.cz> (unless John wants to :-) * GPL v2 * * This driver assumes single CPU. That's okay, because collie is * slightly old hardware, and no one is going to retrofit second CPU to * old PDA. */ /* LCD power functions */ #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/backlight.h> #include <asm/hardware/locomo.h> #include <asm/irq.h> #include <asm/mach/sharpsl_param.h> #include <asm/mach-types.h> #include "../../../arch/arm/mach-sa1100/generic.h" static struct backlight_device *locomolcd_bl_device; static struct locomo_dev *locomolcd_dev; static unsigned long locomolcd_flags; #define LOCOMOLCD_SUSPENDED 0x01 static void locomolcd_on(int comadj) { locomo_gpio_set_dir(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VSHA_ON, 0); locomo_gpio_write(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VSHA_ON, 1); mdelay(2); locomo_gpio_set_dir(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VSHD_ON, 0); locomo_gpio_write(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VSHD_ON, 1); mdelay(2); locomo_m62332_senddata(locomolcd_dev, comadj, 0); mdelay(5); locomo_gpio_set_dir(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VEE_ON, 0); locomo_gpio_write(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VEE_ON, 1); mdelay(10); /* TFTCRST | CPSOUT=0 | CPSEN */ locomo_writel(0x01, locomolcd_dev->mapbase + LOCOMO_TC); /* Set CPSD */ locomo_writel(6, locomolcd_dev->mapbase + LOCOMO_CPSD); /* TFTCRST | CPSOUT=0 | CPSEN */ locomo_writel((0x04 | 0x01), locomolcd_dev->mapbase + LOCOMO_TC); mdelay(10); locomo_gpio_set_dir(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_MOD, 0); locomo_gpio_write(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_MOD, 1); } static void locomolcd_off(int comadj) { /* TFTCRST=1 | CPSOUT=1 | CPSEN = 0 */ locomo_writel(0x06, locomolcd_dev->mapbase + LOCOMO_TC); mdelay(1); locomo_gpio_write(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VSHA_ON, 0); mdelay(110); locomo_gpio_write(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VEE_ON, 0); mdelay(700); /* TFTCRST=0 | CPSOUT=0 | CPSEN = 0 */ locomo_writel(0, locomolcd_dev->mapbase + LOCOMO_TC); locomo_gpio_write(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_MOD, 0); locomo_gpio_write(locomolcd_dev->dev.parent, LOCOMO_GPIO_LCD_VSHD_ON, 0); } void locomolcd_power(int on) { int comadj = sharpsl_param.comadj; unsigned long flags; local_irq_save(flags); if (!locomolcd_dev) { local_irq_restore(flags); return; } /* read comadj */ if (comadj == -1 && machine_is_collie()) comadj = 128; if (comadj == -1 && machine_is_poodle()) comadj = 118; if (on) locomolcd_on(comadj); else locomolcd_off(comadj); local_irq_restore(flags); } EXPORT_SYMBOL(locomolcd_power); static int current_intensity; static int locomolcd_set_intensity(struct backlight_device *bd) { int intensity = bd->props.brightness; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.fb_blank != FB_BLANK_UNBLANK) intensity = 0; if (locomolcd_flags & LOCOMOLCD_SUSPENDED) intensity = 0; switch (intensity) { /* * AC and non-AC are handled differently, * but produce same results in sharp code? */ case 0: locomo_frontlight_set(locomolcd_dev, 0, 0, 161); break; case 1: locomo_frontlight_set(locomolcd_dev, 117, 0, 161); break; case 2: locomo_frontlight_set(locomolcd_dev, 163, 0, 148); break; case 3: locomo_frontlight_set(locomolcd_dev, 194, 0, 161); break; case 4: locomo_frontlight_set(locomolcd_dev, 194, 1, 161); break; default: return -ENODEV; } current_intensity = intensity; return 0; } static int locomolcd_get_intensity(struct backlight_device *bd) { return current_intensity; } static const struct backlight_ops locomobl_data = { .get_brightness = locomolcd_get_intensity, .update_status = locomolcd_set_intensity, }; #ifdef CONFIG_PM static int locomolcd_suspend(struct locomo_dev *dev, pm_message_t state) { locomolcd_flags |= LOCOMOLCD_SUSPENDED; locomolcd_set_intensity(locomolcd_bl_device); return 0; } static int locomolcd_resume(struct locomo_dev *dev) { locomolcd_flags &= ~LOCOMOLCD_SUSPENDED; locomolcd_set_intensity(locomolcd_bl_device); return 0; } #else #define locomolcd_suspend NULL #define locomolcd_resume NULL #endif static int locomolcd_probe(struct locomo_dev *ldev) { struct backlight_properties props; unsigned long flags; local_irq_save(flags); locomolcd_dev = ldev; locomo_gpio_set_dir(ldev->dev.parent, LOCOMO_GPIO_FL_VR, 0); /* * the poodle_lcd_power function is called for the first time * from fs_initcall, which is before locomo is activated. * We need to recall poodle_lcd_power here */ if (machine_is_poodle()) locomolcd_power(1); local_irq_restore(flags); memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = 4; locomolcd_bl_device = backlight_device_register("locomo-bl", &ldev->dev, NULL, &locomobl_data, &props); if (IS_ERR(locomolcd_bl_device)) return PTR_ERR(locomolcd_bl_device); /* Set up frontlight so that screen is readable */ locomolcd_bl_device->props.brightness = 2; locomolcd_set_intensity(locomolcd_bl_device); return 0; } static int locomolcd_remove(struct locomo_dev *dev) { unsigned long flags; locomolcd_bl_device->props.brightness = 0; locomolcd_bl_device->props.power = 0; locomolcd_set_intensity(locomolcd_bl_device); backlight_device_unregister(locomolcd_bl_device); local_irq_save(flags); locomolcd_dev = NULL; local_irq_restore(flags); return 0; } static struct locomo_driver poodle_lcd_driver = { .drv = { .name = "locomo-backlight", }, .devid = LOCOMO_DEVID_BACKLIGHT, .probe = locomolcd_probe, .remove = locomolcd_remove, .suspend = locomolcd_suspend, .resume = locomolcd_resume, }; static int __init locomolcd_init(void) { return locomo_driver_register(&poodle_lcd_driver); } static void __exit locomolcd_exit(void) { locomo_driver_unregister(&poodle_lcd_driver); } module_init(locomolcd_init); module_exit(locomolcd_exit); MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>, Pavel Machek <pavel@ucw.cz>"); MODULE_DESCRIPTION("Collie LCD driver"); MODULE_LICENSE("GPL");
gpl-2.0
icoolguy1995/Elixer-1
net/wireless/util.c
416
28032
/* * Wireless utility functions * * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net> */ #include <linux/export.h> #include <linux/bitops.h> #include <linux/etherdevice.h> #include <linux/slab.h> #include <net/cfg80211.h> #include <net/ip.h> #include <net/dsfield.h> #include "core.h" struct ieee80211_rate * ieee80211_get_response_rate(struct ieee80211_supported_band *sband, u32 basic_rates, int bitrate) { struct ieee80211_rate *result = &sband->bitrates[0]; int i; for (i = 0; i < sband->n_bitrates; i++) { if (!(basic_rates & BIT(i))) continue; if (sband->bitrates[i].bitrate > bitrate) continue; result = &sband->bitrates[i]; } return result; } EXPORT_SYMBOL(ieee80211_get_response_rate); int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band) { /* see 802.11 17.3.8.3.2 and Annex J * there are overlapping channel numbers in 5GHz and 2GHz bands */ if (band == IEEE80211_BAND_5GHZ) { if (chan >= 182 && chan <= 196) return 4000 + chan * 5; else return 5000 + chan * 5; } else { /* IEEE80211_BAND_2GHZ */ if (chan == 14) return 2484; else if (chan < 14) return 2407 + chan * 5; else return 0; /* not supported */ } } EXPORT_SYMBOL(ieee80211_channel_to_frequency); int ieee80211_frequency_to_channel(int freq) { /* see 802.11 17.3.8.3.2 and Annex J */ if (freq == 2484) return 14; else if (freq < 2484) return (freq - 2407) / 5; else if (freq >= 4910 && freq <= 4980) return (freq - 4000) / 5; else return (freq - 5000) / 5; } EXPORT_SYMBOL(ieee80211_frequency_to_channel); struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy, int freq) { enum ieee80211_band band; struct ieee80211_supported_band *sband; int i; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { if (sband->channels[i].center_freq == freq) return &sband->channels[i]; } } return NULL; } EXPORT_SYMBOL(__ieee80211_get_channel); static void set_mandatory_flags_band(struct ieee80211_supported_band *sband, enum ieee80211_band band) { int i, want; switch (band) { case IEEE80211_BAND_5GHZ: want = 3; for (i = 0; i < sband->n_bitrates; i++) { if (sband->bitrates[i].bitrate == 60 || sband->bitrates[i].bitrate == 120 || sband->bitrates[i].bitrate == 240) { sband->bitrates[i].flags |= IEEE80211_RATE_MANDATORY_A; want--; } } WARN_ON(want); break; case IEEE80211_BAND_2GHZ: want = 7; for (i = 0; i < sband->n_bitrates; i++) { if (sband->bitrates[i].bitrate == 10) { sband->bitrates[i].flags |= IEEE80211_RATE_MANDATORY_B | IEEE80211_RATE_MANDATORY_G; want--; } if (sband->bitrates[i].bitrate == 20 || sband->bitrates[i].bitrate == 55 || sband->bitrates[i].bitrate == 110 || sband->bitrates[i].bitrate == 60 || sband->bitrates[i].bitrate == 120 || sband->bitrates[i].bitrate == 240) { sband->bitrates[i].flags |= IEEE80211_RATE_MANDATORY_G; want--; } if (sband->bitrates[i].bitrate != 10 && sband->bitrates[i].bitrate != 20 && sband->bitrates[i].bitrate != 55 && sband->bitrates[i].bitrate != 110) sband->bitrates[i].flags |= IEEE80211_RATE_ERP_G; } WARN_ON(want != 0 && want != 3 && want != 6); break; case IEEE80211_NUM_BANDS: WARN_ON(1); break; } } void ieee80211_set_bitrate_flags(struct wiphy *wiphy) { enum ieee80211_band band; for (band = 0; band < IEEE80211_NUM_BANDS; band++) if (wiphy->bands[band]) set_mandatory_flags_band(wiphy->bands[band], band); } bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher) { int i; for (i = 0; i < wiphy->n_cipher_suites; i++) if (cipher == wiphy->cipher_suites[i]) return true; return false; } int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, struct key_params *params, int key_idx, bool pairwise, const u8 *mac_addr) { if (key_idx > 5) return -EINVAL; if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) return -EINVAL; if (pairwise && !mac_addr) return -EINVAL; /* * Disallow pairwise keys with non-zero index unless it's WEP * or a vendor specific cipher (because current deployments use * pairwise WEP keys with non-zero indices and for vendor specific * ciphers this should be validated in the driver or hardware level * - but 802.11i clearly specifies to use zero) */ if (pairwise && key_idx && ((params->cipher == WLAN_CIPHER_SUITE_TKIP) || (params->cipher == WLAN_CIPHER_SUITE_CCMP) || (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC))) return -EINVAL; switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: if (params->key_len != WLAN_KEY_LEN_WEP40) return -EINVAL; break; case WLAN_CIPHER_SUITE_TKIP: if (params->key_len != WLAN_KEY_LEN_TKIP) return -EINVAL; break; case WLAN_CIPHER_SUITE_CCMP: if (params->key_len != WLAN_KEY_LEN_CCMP) return -EINVAL; break; case WLAN_CIPHER_SUITE_WEP104: if (params->key_len != WLAN_KEY_LEN_WEP104) return -EINVAL; break; case WLAN_CIPHER_SUITE_AES_CMAC: if (params->key_len != WLAN_KEY_LEN_AES_CMAC) return -EINVAL; break; case WLAN_CIPHER_SUITE_SMS4: if (params->key_len != WLAN_KEY_LEN_WAPI_SMS4) return -EINVAL; break; default: /* * We don't know anything about this algorithm, * allow using it -- but the driver must check * all parameters! We still check below whether * or not the driver supports this algorithm, * of course. */ break; } if (params->seq) { switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: /* These ciphers do not use key sequence */ return -EINVAL; case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_AES_CMAC: if (params->seq_len != 6) return -EINVAL; break; } } if (!cfg80211_supported_cipher_suite(&rdev->wiphy, params->cipher)) return -EINVAL; return 0; } unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc) { unsigned int hdrlen = 24; if (ieee80211_is_data(fc)) { if (ieee80211_has_a4(fc)) hdrlen = 30; if (ieee80211_is_data_qos(fc)) { hdrlen += IEEE80211_QOS_CTL_LEN; if (ieee80211_has_order(fc)) hdrlen += IEEE80211_HT_CTL_LEN; } goto out; } if (ieee80211_is_ctl(fc)) { /* * ACK and CTS are 10 bytes, all others 16. To see how * to get this condition consider * subtype mask: 0b0000000011110000 (0x00F0) * ACK subtype: 0b0000000011010000 (0x00D0) * CTS subtype: 0b0000000011000000 (0x00C0) * bits that matter: ^^^ (0x00E0) * value of those: 0b0000000011000000 (0x00C0) */ if ((fc & cpu_to_le16(0x00E0)) == cpu_to_le16(0x00C0)) hdrlen = 10; else hdrlen = 16; } out: return hdrlen; } EXPORT_SYMBOL(ieee80211_hdrlen); unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb) { const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)skb->data; unsigned int hdrlen; if (unlikely(skb->len < 10)) return 0; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (unlikely(hdrlen > skb->len)) return 0; return hdrlen; } EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb); static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) { int ae = meshhdr->flags & MESH_FLAGS_AE; /* 7.1.3.5a.2 */ switch (ae) { case 0: return 6; case MESH_FLAGS_AE_A4: return 12; case MESH_FLAGS_AE_A5_A6: return 18; case (MESH_FLAGS_AE_A4 | MESH_FLAGS_AE_A5_A6): return 24; default: return 6; } } int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, enum nl80211_iftype iftype) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; u16 hdrlen, ethertype; u8 *payload; u8 dst[ETH_ALEN]; u8 src[ETH_ALEN] __aligned(2); if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) return -1; hdrlen = ieee80211_hdrlen(hdr->frame_control); /* convert IEEE 802.11 header + possible LLC headers into Ethernet * header * IEEE 802.11 address fields: * ToDS FromDS Addr1 Addr2 Addr3 Addr4 * 0 0 DA SA BSSID n/a * 0 1 DA BSSID SA n/a * 1 0 BSSID SA DA n/a * 1 1 RA TA DA SA */ memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN); memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN); switch (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { case cpu_to_le16(IEEE80211_FCTL_TODS): if (unlikely(iftype != NL80211_IFTYPE_AP && iftype != NL80211_IFTYPE_AP_VLAN && iftype != NL80211_IFTYPE_P2P_GO)) return -1; break; case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): if (unlikely(iftype != NL80211_IFTYPE_WDS && iftype != NL80211_IFTYPE_MESH_POINT && iftype != NL80211_IFTYPE_AP_VLAN && iftype != NL80211_IFTYPE_STATION)) return -1; if (iftype == NL80211_IFTYPE_MESH_POINT) { struct ieee80211s_hdr *meshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); /* make sure meshdr->flags is on the linear part */ if (!pskb_may_pull(skb, hdrlen + 1)) return -1; if (meshdr->flags & MESH_FLAGS_AE_A5_A6) { skb_copy_bits(skb, hdrlen + offsetof(struct ieee80211s_hdr, eaddr1), dst, ETH_ALEN); skb_copy_bits(skb, hdrlen + offsetof(struct ieee80211s_hdr, eaddr2), src, ETH_ALEN); } hdrlen += ieee80211_get_mesh_hdrlen(meshdr); } break; case cpu_to_le16(IEEE80211_FCTL_FROMDS): if ((iftype != NL80211_IFTYPE_STATION && iftype != NL80211_IFTYPE_P2P_CLIENT && iftype != NL80211_IFTYPE_MESH_POINT) || (is_multicast_ether_addr(dst) && !compare_ether_addr(src, addr))) return -1; if (iftype == NL80211_IFTYPE_MESH_POINT) { struct ieee80211s_hdr *meshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); /* make sure meshdr->flags is on the linear part */ if (!pskb_may_pull(skb, hdrlen + 1)) return -1; if (meshdr->flags & MESH_FLAGS_AE_A4) skb_copy_bits(skb, hdrlen + offsetof(struct ieee80211s_hdr, eaddr1), src, ETH_ALEN); hdrlen += ieee80211_get_mesh_hdrlen(meshdr); } break; case cpu_to_le16(0): if (iftype != NL80211_IFTYPE_ADHOC && iftype != NL80211_IFTYPE_STATION) return -1; break; } if (!pskb_may_pull(skb, hdrlen + 8)) return -1; payload = skb->data + hdrlen; ethertype = (payload[6] << 8) | payload[7]; if (likely((compare_ether_addr(payload, rfc1042_header) == 0 && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || compare_ether_addr(payload, bridge_tunnel_header) == 0)) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ skb_pull(skb, hdrlen + 6); memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN); } else { struct ethhdr *ehdr; __be16 len; skb_pull(skb, hdrlen); len = htons(skb->len); ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr)); memcpy(ehdr->h_dest, dst, ETH_ALEN); memcpy(ehdr->h_source, src, ETH_ALEN); ehdr->h_proto = len; } return 0; } EXPORT_SYMBOL(ieee80211_data_to_8023); int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, enum nl80211_iftype iftype, u8 *bssid, bool qos) { struct ieee80211_hdr hdr; u16 hdrlen, ethertype; __le16 fc; const u8 *encaps_data; int encaps_len, skip_header_bytes; int nh_pos, h_pos; int head_need; if (unlikely(skb->len < ETH_HLEN)) return -EINVAL; nh_pos = skb_network_header(skb) - skb->data; h_pos = skb_transport_header(skb) - skb->data; /* convert Ethernet header to proper 802.11 header (based on * operation mode) */ ethertype = (skb->data[12] << 8) | skb->data[13]; fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); /* DA BSSID SA */ memcpy(hdr.addr1, skb->data, ETH_ALEN); memcpy(hdr.addr2, addr, ETH_ALEN); memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); hdrlen = 24; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: fc |= cpu_to_le16(IEEE80211_FCTL_TODS); /* BSSID SA DA */ memcpy(hdr.addr1, bssid, ETH_ALEN); memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); memcpy(hdr.addr3, skb->data, ETH_ALEN); hdrlen = 24; break; case NL80211_IFTYPE_ADHOC: /* DA SA BSSID */ memcpy(hdr.addr1, skb->data, ETH_ALEN); memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); memcpy(hdr.addr3, bssid, ETH_ALEN); hdrlen = 24; break; default: return -EOPNOTSUPP; } if (qos) { fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); hdrlen += 2; } hdr.frame_control = fc; hdr.duration_id = 0; hdr.seq_ctrl = 0; skip_header_bytes = ETH_HLEN; if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) { encaps_data = bridge_tunnel_header; encaps_len = sizeof(bridge_tunnel_header); skip_header_bytes -= 2; } else if (ethertype > 0x600) { encaps_data = rfc1042_header; encaps_len = sizeof(rfc1042_header); skip_header_bytes -= 2; } else { encaps_data = NULL; encaps_len = 0; } skb_pull(skb, skip_header_bytes); nh_pos -= skip_header_bytes; h_pos -= skip_header_bytes; head_need = hdrlen + encaps_len - skb_headroom(skb); if (head_need > 0 || skb_cloned(skb)) { head_need = max(head_need, 0); if (head_need) skb_orphan(skb); if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC)) return -ENOMEM; skb->truesize += head_need; } if (encaps_data) { memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); nh_pos += encaps_len; h_pos += encaps_len; } memcpy(skb_push(skb, hdrlen), &hdr, hdrlen); nh_pos += hdrlen; h_pos += hdrlen; /* Update skb pointers to various headers since this modified frame * is going to go through Linux networking code that may potentially * need things like pointer to IP header. */ skb_set_mac_header(skb, 0); skb_set_network_header(skb, nh_pos); skb_set_transport_header(skb, h_pos); return 0; } EXPORT_SYMBOL(ieee80211_data_from_8023); void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, const u8 *addr, enum nl80211_iftype iftype, const unsigned int extra_headroom, bool has_80211_header) { struct sk_buff *frame = NULL; u16 ethertype; u8 *payload; const struct ethhdr *eth; int remaining, err; u8 dst[ETH_ALEN], src[ETH_ALEN]; if (has_80211_header) { err = ieee80211_data_to_8023(skb, addr, iftype); if (err) goto out; /* skip the wrapping header */ eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr)); if (!eth) goto out; } else { eth = (struct ethhdr *) skb->data; } while (skb != frame) { u8 padding; __be16 len = eth->h_proto; unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len); remaining = skb->len; memcpy(dst, eth->h_dest, ETH_ALEN); memcpy(src, eth->h_source, ETH_ALEN); padding = (4 - subframe_len) & 0x3; /* the last MSDU has no padding */ if (subframe_len > remaining) goto purge; skb_pull(skb, sizeof(struct ethhdr)); /* reuse skb for the last subframe */ if (remaining <= subframe_len + padding) frame = skb; else { unsigned int hlen = ALIGN(extra_headroom, 4); /* * Allocate and reserve two bytes more for payload * alignment since sizeof(struct ethhdr) is 14. */ frame = dev_alloc_skb(hlen + subframe_len + 2); if (!frame) goto purge; skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2); memcpy(skb_put(frame, ntohs(len)), skb->data, ntohs(len)); eth = (struct ethhdr *)skb_pull(skb, ntohs(len) + padding); if (!eth) { dev_kfree_skb(frame); goto purge; } } skb_reset_network_header(frame); frame->dev = skb->dev; frame->priority = skb->priority; payload = frame->data; ethertype = (payload[6] << 8) | payload[7]; if (likely((compare_ether_addr(payload, rfc1042_header) == 0 && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || compare_ether_addr(payload, bridge_tunnel_header) == 0)) { /* remove RFC1042 or Bridge-Tunnel * encapsulation and replace EtherType */ skb_pull(frame, 6); memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN); } else { memcpy(skb_push(frame, sizeof(__be16)), &len, sizeof(__be16)); memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN); } __skb_queue_tail(list, frame); } return; purge: __skb_queue_purge(list); out: dev_kfree_skb(skb); } EXPORT_SYMBOL(ieee80211_amsdu_to_8023s); /* Given a data frame determine the 802.1p/1d tag to use. */ unsigned int cfg80211_classify8021d(struct sk_buff *skb) { unsigned int dscp; /* skb->priority values from 256->263 are magic values to * directly indicate a specific 802.1d priority. This is used * to allow 802.1d priority to be passed directly in from VLAN * tags, etc. */ if (skb->priority >= 256 && skb->priority <= 263) return skb->priority - 256; switch (skb->protocol) { case htons(ETH_P_IP): dscp = ipv4_get_dsfield(ip_hdr(skb)) & 0xfc; break; case htons(ETH_P_IPV6): dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & 0xfc; break; default: return 0; } return dscp >> 5; } EXPORT_SYMBOL(cfg80211_classify8021d); const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie) { u8 *end, *pos; pos = bss->information_elements; if (pos == NULL) return NULL; end = pos + bss->len_information_elements; while (pos + 1 < end) { if (pos + 2 + pos[1] > end) break; if (pos[0] == ie) return pos; pos += 2 + pos[1]; } return NULL; } EXPORT_SYMBOL(ieee80211_bss_get_ie); void cfg80211_upload_connect_keys(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct net_device *dev = wdev->netdev; int i; if (!wdev->connect_keys) return; for (i = 0; i < 6; i++) { if (!wdev->connect_keys->params[i].cipher) continue; if (rdev->ops->add_key(wdev->wiphy, dev, i, false, NULL, &wdev->connect_keys->params[i])) { netdev_err(dev, "failed to set key %d\n", i); continue; } if (wdev->connect_keys->def == i) if (rdev->ops->set_default_key(wdev->wiphy, dev, i, true, true)) { netdev_err(dev, "failed to set defkey %d\n", i); continue; } if (wdev->connect_keys->defmgmt == i) if (rdev->ops->set_default_mgmt_key(wdev->wiphy, dev, i)) netdev_err(dev, "failed to set mgtdef %d\n", i); } kfree(wdev->connect_keys); wdev->connect_keys = NULL; } static void cfg80211_process_wdev_events(struct wireless_dev *wdev) { struct cfg80211_event *ev; unsigned long flags; const u8 *bssid = NULL; spin_lock_irqsave(&wdev->event_lock, flags); while (!list_empty(&wdev->event_list)) { ev = list_first_entry(&wdev->event_list, struct cfg80211_event, list); list_del(&ev->list); spin_unlock_irqrestore(&wdev->event_lock, flags); wdev_lock(wdev); switch (ev->type) { case EVENT_CONNECT_RESULT: if (!is_zero_ether_addr(ev->cr.bssid)) bssid = ev->cr.bssid; __cfg80211_connect_result( wdev->netdev, bssid, ev->cr.req_ie, ev->cr.req_ie_len, ev->cr.resp_ie, ev->cr.resp_ie_len, ev->cr.status, ev->cr.status == WLAN_STATUS_SUCCESS, NULL); break; case EVENT_ROAMED: __cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie, ev->rm.req_ie_len, ev->rm.resp_ie, ev->rm.resp_ie_len); break; case EVENT_DISCONNECTED: __cfg80211_disconnected(wdev->netdev, ev->dc.ie, ev->dc.ie_len, ev->dc.reason, true); break; case EVENT_IBSS_JOINED: __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid); break; } wdev_unlock(wdev); kfree(ev); spin_lock_irqsave(&wdev->event_lock, flags); } spin_unlock_irqrestore(&wdev->event_lock, flags); } void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev) { struct wireless_dev *wdev; ASSERT_RTNL(); ASSERT_RDEV_LOCK(rdev); mutex_lock(&rdev->devlist_mtx); list_for_each_entry(wdev, &rdev->netdev_list, list) cfg80211_process_wdev_events(wdev); mutex_unlock(&rdev->devlist_mtx); } int cfg80211_change_iface(struct cfg80211_registered_device *rdev, struct net_device *dev, enum nl80211_iftype ntype, u32 *flags, struct vif_params *params) { int err; enum nl80211_iftype otype = dev->ieee80211_ptr->iftype; ASSERT_RDEV_LOCK(rdev); /* don't support changing VLANs, you just re-create them */ if (otype == NL80211_IFTYPE_AP_VLAN) return -EOPNOTSUPP; if (!rdev->ops->change_virtual_intf || !(rdev->wiphy.interface_modes & (1 << ntype))) return -EOPNOTSUPP; /* if it's part of a bridge, reject changing type to station/ibss */ if ((dev->priv_flags & IFF_BRIDGE_PORT) && (ntype == NL80211_IFTYPE_ADHOC || ntype == NL80211_IFTYPE_STATION || ntype == NL80211_IFTYPE_P2P_CLIENT)) return -EBUSY; if (ntype != otype) { err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr, ntype); if (err) return err; dev->ieee80211_ptr->use_4addr = false; dev->ieee80211_ptr->mesh_id_up_len = 0; switch (otype) { case NL80211_IFTYPE_ADHOC: cfg80211_leave_ibss(rdev, dev, false); break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, true); break; case NL80211_IFTYPE_MESH_POINT: /* mesh should be handled? */ break; default: break; } cfg80211_process_rdev_events(rdev); } err = rdev->ops->change_virtual_intf(&rdev->wiphy, dev, ntype, flags, params); WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype); if (!err && params && params->use_4addr != -1) dev->ieee80211_ptr->use_4addr = params->use_4addr; if (!err) { dev->priv_flags &= ~IFF_DONT_BRIDGE; switch (ntype) { case NL80211_IFTYPE_STATION: if (dev->ieee80211_ptr->use_4addr) break; /* fall through */ case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_ADHOC: dev->priv_flags |= IFF_DONT_BRIDGE; break; case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_MESH_POINT: /* bridging OK */ break; case NL80211_IFTYPE_MONITOR: /* monitor can't bridge anyway */ break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: /* not happening */ break; } } return err; } static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate) { static const u32 base[4][10] = { { 6500000, 13000000, 19500000, 26000000, 39000000, 52000000, 58500000, 65000000, 78000000, 0, }, { 13500000, 27000000, 40500000, 54000000, 81000000, 108000000, 121500000, 135000000, 162000000, 180000000, }, { 29300000, 58500000, 87800000, 117000000, 175500000, 234000000, 263300000, 292500000, 351000000, 390000000, }, { 58500000, 117000000, 175500000, 234000000, 351000000, 468000000, 526500000, 585000000, 702000000, 780000000, }, }; u32 bitrate; int idx; if (WARN_ON_ONCE(rate->mcs > 9)) return 0; idx = rate->flags & (RATE_INFO_FLAGS_160_MHZ_WIDTH | RATE_INFO_FLAGS_80P80_MHZ_WIDTH) ? 3 : rate->flags & RATE_INFO_FLAGS_80_MHZ_WIDTH ? 2 : rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH ? 1 : 0; bitrate = base[idx][rate->mcs]; bitrate *= rate->nss; if (rate->flags & RATE_INFO_FLAGS_SHORT_GI) bitrate = (bitrate / 9) * 10; /* do NOT round down here */ return (bitrate + 50000) / 100000; } u16 cfg80211_calculate_bitrate(struct rate_info *rate) { int modulation, streams, bitrate; if (!(rate->flags & RATE_INFO_FLAGS_MCS) && !(rate->flags & RATE_INFO_FLAGS_VHT_MCS)) return rate->legacy; if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) return cfg80211_calculate_bitrate_vht(rate); /* the formula below does only work for MCS values smaller than 32 */ if (rate->mcs >= 32) return 0; modulation = rate->mcs & 7; streams = (rate->mcs >> 3) + 1; bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ? 13500000 : 6500000; if (modulation < 4) bitrate *= (modulation + 1); else if (modulation == 4) bitrate *= (modulation + 2); else bitrate *= (modulation + 3); bitrate *= streams; if (rate->flags & RATE_INFO_FLAGS_SHORT_GI) bitrate = (bitrate / 9) * 10; /* do NOT round down here */ return (bitrate + 50000) / 100000; } EXPORT_SYMBOL(cfg80211_calculate_bitrate); int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, u32 beacon_int) { struct wireless_dev *wdev; int res = 0; if (!beacon_int) return -EINVAL; mutex_lock(&rdev->devlist_mtx); list_for_each_entry(wdev, &rdev->netdev_list, list) { if (!wdev->beacon_interval) continue; if (wdev->beacon_interval != beacon_int) { res = -EINVAL; break; } } mutex_unlock(&rdev->devlist_mtx); return res; } int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, enum nl80211_iftype iftype) { struct wireless_dev *wdev_iter; int num[NUM_NL80211_IFTYPES]; int total = 1; int i, j; ASSERT_RTNL(); /* Always allow software iftypes */ if (rdev->wiphy.software_iftypes & BIT(iftype)) return 0; /* * Drivers will gradually all set this flag, until all * have it we only enforce for those that set it. */ if (!(rdev->wiphy.flags & WIPHY_FLAG_ENFORCE_COMBINATIONS)) return 0; memset(num, 0, sizeof(num)); num[iftype] = 1; mutex_lock(&rdev->devlist_mtx); list_for_each_entry(wdev_iter, &rdev->netdev_list, list) { if (wdev_iter == wdev) continue; if (!netif_running(wdev_iter->netdev)) continue; if (rdev->wiphy.software_iftypes & BIT(wdev_iter->iftype)) continue; num[wdev_iter->iftype]++; total++; } mutex_unlock(&rdev->devlist_mtx); for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { const struct ieee80211_iface_combination *c; struct ieee80211_iface_limit *limits; c = &rdev->wiphy.iface_combinations[i]; limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits, GFP_KERNEL); if (!limits) return -ENOMEM; if (total > c->max_interfaces) goto cont; for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) { if (rdev->wiphy.software_iftypes & BIT(iftype)) continue; for (j = 0; j < c->n_limits; j++) { if (!(limits[j].types & BIT(iftype))) continue; if (limits[j].max < num[iftype]) goto cont; limits[j].max -= num[iftype]; } } /* yay, it fits */ kfree(limits); return 0; cont: kfree(limits); } return -EBUSY; } int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, const u8 *rates, unsigned int n_rates, u32 *mask) { int i, j; if (!sband) return -EINVAL; if (n_rates == 0 || n_rates > NL80211_MAX_SUPP_RATES) return -EINVAL; *mask = 0; for (i = 0; i < n_rates; i++) { int rate = (rates[i] & 0x7f) * 5; bool found = false; for (j = 0; j < sband->n_bitrates; j++) { if (sband->bitrates[j].bitrate == rate) { found = true; *mask |= BIT(j); break; } } if (!found) return -EINVAL; } /* * mask must have at least one bit set here since we * didn't accept a 0-length rates array nor allowed * entries in the array that didn't exist */ return 0; } /* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ /* Ethernet-II snap header (RFC1042 for most EtherTypes) */ const unsigned char rfc1042_header[] __aligned(2) = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; EXPORT_SYMBOL(rfc1042_header); /* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ const unsigned char bridge_tunnel_header[] __aligned(2) = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; EXPORT_SYMBOL(bridge_tunnel_header);
gpl-2.0
JoshWu/linux-at91
drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
416
60054
/****************************************************************************** * * Copyright(c) 2009-2013 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../base.h" #include "../pci.h" #include "reg.h" #include "def.h" #include "phy.h" #include "dm.h" #include "fw.h" #include "trx.h" static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = { 0x7f8001fe, /* 0, +6.0dB */ 0x788001e2, /* 1, +5.5dB */ 0x71c001c7, /* 2, +5.0dB */ 0x6b8001ae, /* 3, +4.5dB */ 0x65400195, /* 4, +4.0dB */ 0x5fc0017f, /* 5, +3.5dB */ 0x5a400169, /* 6, +3.0dB */ 0x55400155, /* 7, +2.5dB */ 0x50800142, /* 8, +2.0dB */ 0x4c000130, /* 9, +1.5dB */ 0x47c0011f, /* 10, +1.0dB */ 0x43c0010f, /* 11, +0.5dB */ 0x40000100, /* 12, +0dB */ 0x3c8000f2, /* 13, -0.5dB */ 0x390000e4, /* 14, -1.0dB */ 0x35c000d7, /* 15, -1.5dB */ 0x32c000cb, /* 16, -2.0dB */ 0x300000c0, /* 17, -2.5dB */ 0x2d4000b5, /* 18, -3.0dB */ 0x2ac000ab, /* 19, -3.5dB */ 0x288000a2, /* 20, -4.0dB */ 0x26000098, /* 21, -4.5dB */ 0x24000090, /* 22, -5.0dB */ 0x22000088, /* 23, -5.5dB */ 0x20000080, /* 24, -6.0dB */ 0x1e400079, /* 25, -6.5dB */ 0x1c800072, /* 26, -7.0dB */ 0x1b00006c, /* 27. -7.5dB */ 0x19800066, /* 28, -8.0dB */ 0x18000060, /* 29, -8.5dB */ 0x16c0005b, /* 30, -9.0dB */ 0x15800056, /* 31, -9.5dB */ 0x14400051, /* 32, -10.0dB */ 0x1300004c, /* 33, -10.5dB */ 0x12000048, /* 34, -11.0dB */ 0x11000044, /* 35, -11.5dB */ 0x10000040, /* 36, -12.0dB */ 0x0f00003c, /* 37, -12.5dB */ 0x0e400039, /* 38, -13.0dB */ 0x0d800036, /* 39, -13.5dB */ 0x0cc00033, /* 40, -14.0dB */ 0x0c000030, /* 41, -14.5dB */ 0x0b40002d, /* 42, -15.0dB */ }; static const u8 cck_tbl_ch1_13[CCK_TABLE_SIZE][8] = { {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */ {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */ {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */ {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */ {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */ {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */ {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */ {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */ {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */ {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */ {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB*/ {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB*/ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB*/ {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB*/ {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB*/ {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB*/ {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB*/ {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB*/ {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB*/ {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB*/ {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB*/ {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB*/ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB*/ }; static const u8 cck_tbl_ch14[CCK_TABLE_SIZE][8] = { {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */ {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */ {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */ {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */ {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */ {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */ {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */ {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */ {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */ {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */ {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB*/ {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB*/ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB*/ {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB*/ {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB*/ {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB*/ {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB*/ {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB*/ {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB*/ {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB*/ {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB*/ {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB*/ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB*/ }; #define CAL_SWING_OFF(_off, _dir, _size, _del) \ do { \ for (_off = 0; _off < _size; _off++) { \ if (_del < thermal_threshold[_dir][_off]) { \ if (_off != 0) \ _off--; \ break; \ } \ } \ if (_off >= _size) \ _off = _size - 1; \ } while (0) static void rtl88e_set_iqk_matrix(struct ieee80211_hw *hw, u8 ofdm_index, u8 rfpath, long iqk_result_x, long iqk_result_y) { long ele_a = 0, ele_d, ele_c = 0, value32; ele_d = (ofdmswing_table[ofdm_index] & 0xFFC00000)>>22; if (iqk_result_x != 0) { if ((iqk_result_x & 0x00000200) != 0) iqk_result_x = iqk_result_x | 0xFFFFFC00; ele_a = ((iqk_result_x * ele_d)>>8)&0x000003FF; if ((iqk_result_y & 0x00000200) != 0) iqk_result_y = iqk_result_y | 0xFFFFFC00; ele_c = ((iqk_result_y * ele_d)>>8)&0x000003FF; switch (rfpath) { case RF90_PATH_A: value32 = (ele_d << 22)|((ele_c & 0x3F)<<16) | ele_a; rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, value32); value32 = (ele_c & 0x000003C0) >> 6; rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, value32); value32 = ((iqk_result_x * ele_d) >> 7) & 0x01; rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), value32); break; case RF90_PATH_B: value32 = (ele_d << 22)|((ele_c & 0x3F)<<16) | ele_a; rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD, value32); value32 = (ele_c & 0x000003C0) >> 6; rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, value32); value32 = ((iqk_result_x * ele_d) >> 7) & 0x01; rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), value32); break; default: break; } } else { switch (rfpath) { case RF90_PATH_A: rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, ofdmswing_table[ofdm_index]); rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, 0x00); rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), 0x00); break; case RF90_PATH_B: rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD, ofdmswing_table[ofdm_index]); rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, 0x00); rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), 0x00); break; default: break; } } } void rtl88e_dm_txpower_track_adjust(struct ieee80211_hw *hw, u8 type, u8 *pdirection, u32 *poutwrite_val) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); u8 pwr_val = 0; u8 cck_base = rtldm->swing_idx_cck_base; u8 cck_val = rtldm->swing_idx_cck; u8 ofdm_base = rtldm->swing_idx_ofdm_base[0]; u8 ofdm_val = rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A]; if (type == 0) { if (ofdm_val <= ofdm_base) { *pdirection = 1; pwr_val = ofdm_base - ofdm_val; } else { *pdirection = 2; pwr_val = ofdm_base - ofdm_val; } } else if (type == 1) { if (cck_val <= cck_base) { *pdirection = 1; pwr_val = cck_base - cck_val; } else { *pdirection = 2; pwr_val = cck_val - cck_base; } } if (pwr_val >= TXPWRTRACK_MAX_IDX && (*pdirection == 1)) pwr_val = TXPWRTRACK_MAX_IDX; *poutwrite_val = pwr_val | (pwr_val << 8) | (pwr_val << 16) | (pwr_val << 24); } static void dm_tx_pwr_track_set_pwr(struct ieee80211_hw *hw, enum pwr_track_control_method method, u8 rfpath, u8 channel_mapped_index) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); if (method == TXAGC) { if (rtldm->swing_flag_ofdm || rtldm->swing_flag_cck) { rtl88e_phy_set_txpower_level(hw, rtlphy->current_channel); rtldm->swing_flag_ofdm = false; rtldm->swing_flag_cck = false; } } else if (method == BBSWING) { if (!rtldm->cck_inch14) { rtl_write_byte(rtlpriv, 0xa22, cck_tbl_ch1_13[rtldm->swing_idx_cck][0]); rtl_write_byte(rtlpriv, 0xa23, cck_tbl_ch1_13[rtldm->swing_idx_cck][1]); rtl_write_byte(rtlpriv, 0xa24, cck_tbl_ch1_13[rtldm->swing_idx_cck][2]); rtl_write_byte(rtlpriv, 0xa25, cck_tbl_ch1_13[rtldm->swing_idx_cck][3]); rtl_write_byte(rtlpriv, 0xa26, cck_tbl_ch1_13[rtldm->swing_idx_cck][4]); rtl_write_byte(rtlpriv, 0xa27, cck_tbl_ch1_13[rtldm->swing_idx_cck][5]); rtl_write_byte(rtlpriv, 0xa28, cck_tbl_ch1_13[rtldm->swing_idx_cck][6]); rtl_write_byte(rtlpriv, 0xa29, cck_tbl_ch1_13[rtldm->swing_idx_cck][7]); } else { rtl_write_byte(rtlpriv, 0xa22, cck_tbl_ch14[rtldm->swing_idx_cck][0]); rtl_write_byte(rtlpriv, 0xa23, cck_tbl_ch14[rtldm->swing_idx_cck][1]); rtl_write_byte(rtlpriv, 0xa24, cck_tbl_ch14[rtldm->swing_idx_cck][2]); rtl_write_byte(rtlpriv, 0xa25, cck_tbl_ch14[rtldm->swing_idx_cck][3]); rtl_write_byte(rtlpriv, 0xa26, cck_tbl_ch14[rtldm->swing_idx_cck][4]); rtl_write_byte(rtlpriv, 0xa27, cck_tbl_ch14[rtldm->swing_idx_cck][5]); rtl_write_byte(rtlpriv, 0xa28, cck_tbl_ch14[rtldm->swing_idx_cck][6]); rtl_write_byte(rtlpriv, 0xa29, cck_tbl_ch14[rtldm->swing_idx_cck][7]); } if (rfpath == RF90_PATH_A) { rtl88e_set_iqk_matrix(hw, rtldm->swing_idx_ofdm[rfpath], rfpath, rtlphy->iqk_matrix [channel_mapped_index]. value[0][0], rtlphy->iqk_matrix [channel_mapped_index]. value[0][1]); } else if (rfpath == RF90_PATH_B) { rtl88e_set_iqk_matrix(hw, rtldm->swing_idx_ofdm[rfpath], rfpath, rtlphy->iqk_matrix [channel_mapped_index]. value[0][4], rtlphy->iqk_matrix [channel_mapped_index]. value[0][5]); } } else { return; } } static void rtl88e_dm_diginit(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_dig = &rtlpriv->dm_digtable; dm_dig->dig_enable_flag = true; dm_dig->cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f); dm_dig->pre_igvalue = 0; dm_dig->cur_sta_cstate = DIG_STA_DISCONNECT; dm_dig->presta_cstate = DIG_STA_DISCONNECT; dm_dig->curmultista_cstate = DIG_MULTISTA_DISCONNECT; dm_dig->rssi_lowthresh = DM_DIG_THRESH_LOW; dm_dig->rssi_highthresh = DM_DIG_THRESH_HIGH; dm_dig->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; dm_dig->fa_highthresh = DM_FALSEALARM_THRESH_HIGH; dm_dig->rx_gain_max = DM_DIG_MAX; dm_dig->rx_gain_min = DM_DIG_MIN; dm_dig->back_val = DM_DIG_BACKOFF_DEFAULT; dm_dig->back_range_max = DM_DIG_BACKOFF_MAX; dm_dig->back_range_min = DM_DIG_BACKOFF_MIN; dm_dig->pre_cck_cca_thres = 0xff; dm_dig->cur_cck_cca_thres = 0x83; dm_dig->forbidden_igi = DM_DIG_MIN; dm_dig->large_fa_hit = 0; dm_dig->recover_cnt = 0; dm_dig->dig_min_0 = 0x25; dm_dig->dig_min_1 = 0x25; dm_dig->media_connect_0 = false; dm_dig->media_connect_1 = false; rtlpriv->dm.dm_initialgain_enable = true; } static u8 rtl88e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_dig = &rtlpriv->dm_digtable; long rssi_val_min = 0; if ((dm_dig->curmultista_cstate == DIG_MULTISTA_CONNECT) && (dm_dig->cur_sta_cstate == DIG_STA_CONNECT)) { if (rtlpriv->dm.entry_min_undec_sm_pwdb != 0) rssi_val_min = (rtlpriv->dm.entry_min_undec_sm_pwdb > rtlpriv->dm.undec_sm_pwdb) ? rtlpriv->dm.undec_sm_pwdb : rtlpriv->dm.entry_min_undec_sm_pwdb; else rssi_val_min = rtlpriv->dm.undec_sm_pwdb; } else if (dm_dig->cur_sta_cstate == DIG_STA_CONNECT || dm_dig->cur_sta_cstate == DIG_STA_BEFORE_CONNECT) { rssi_val_min = rtlpriv->dm.undec_sm_pwdb; } else if (dm_dig->curmultista_cstate == DIG_MULTISTA_CONNECT) { rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb; } return (u8)rssi_val_min; } static void rtl88e_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) { u32 ret_value; struct rtl_priv *rtlpriv = rtl_priv(hw); struct false_alarm_statistics *falsealm_cnt = &rtlpriv->falsealm_cnt; rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD); falsealm_cnt->cnt_fast_fsync_fail = (ret_value&0xffff); falsealm_cnt->cnt_sb_search_fail = ((ret_value&0xffff0000)>>16); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD); falsealm_cnt->cnt_ofdm_cca = (ret_value&0xffff); falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD); falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff); falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD); falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff); falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail + falsealm_cnt->cnt_rate_illegal + falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail + falsealm_cnt->cnt_fast_fsync_fail + falsealm_cnt->cnt_sb_search_fail; ret_value = rtl_get_bbreg(hw, REG_SC_CNT, MASKDWORD); falsealm_cnt->cnt_bw_lsc = (ret_value & 0xffff); falsealm_cnt->cnt_bw_usc = ((ret_value & 0xffff0000) >> 16); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(12), 1); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1); ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0); falsealm_cnt->cnt_cck_fail = ret_value; ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3); falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8; ret_value = rtl_get_bbreg(hw, RCCK0_CCA_CNT, MASKDWORD); falsealm_cnt->cnt_cck_cca = ((ret_value & 0xff) << 8) | ((ret_value&0xFF00)>>8); falsealm_cnt->cnt_all = (falsealm_cnt->cnt_fast_fsync_fail + falsealm_cnt->cnt_sb_search_fail + falsealm_cnt->cnt_parity_fail + falsealm_cnt->cnt_rate_illegal + falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail + falsealm_cnt->cnt_cck_fail); falsealm_cnt->cnt_cca_all = falsealm_cnt->cnt_ofdm_cca + falsealm_cnt->cnt_cck_cca; rtl_set_bbreg(hw, ROFDM0_TRSWISOLATION, BIT(31), 1); rtl_set_bbreg(hw, ROFDM0_TRSWISOLATION, BIT(31), 0); rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(27), 1); rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(27), 0); rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 0); rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 0); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(13)|BIT(12), 0); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(13)|BIT(12), 2); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(15)|BIT(14), 0); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(15)|BIT(14), 2); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n", falsealm_cnt->cnt_parity_fail, falsealm_cnt->cnt_rate_illegal, falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n", falsealm_cnt->cnt_ofdm_fail, falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all); } static void rtl88e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_dig = &rtlpriv->dm_digtable; u8 cur_cck_cca_thresh; if (dm_dig->cur_sta_cstate == DIG_STA_CONNECT) { dm_dig->rssi_val_min = rtl88e_dm_initial_gain_min_pwdb(hw); if (dm_dig->rssi_val_min > 25) { cur_cck_cca_thresh = 0xcd; } else if ((dm_dig->rssi_val_min <= 25) && (dm_dig->rssi_val_min > 10)) { cur_cck_cca_thresh = 0x83; } else { if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000) cur_cck_cca_thresh = 0x83; else cur_cck_cca_thresh = 0x40; } } else { if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000) cur_cck_cca_thresh = 0x83; else cur_cck_cca_thresh = 0x40; } if (dm_dig->cur_cck_cca_thres != cur_cck_cca_thresh) rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, cur_cck_cca_thresh); dm_dig->cur_cck_cca_thres = cur_cck_cca_thresh; dm_dig->pre_cck_cca_thres = dm_dig->cur_cck_cca_thres; RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "CCK cca thresh hold =%x\n", dm_dig->cur_cck_cca_thres); } static void rtl88e_dm_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct dig_t *dm_dig = &rtlpriv->dm_digtable; u8 dig_dynamic_min, dig_maxofmin; bool bfirstconnect; u8 dm_dig_max, dm_dig_min; u8 current_igi = dm_dig->cur_igvalue; if (rtlpriv->dm.dm_initialgain_enable == false) return; if (dm_dig->dig_enable_flag == false) return; if (mac->act_scanning == true) return; if (mac->link_state >= MAC80211_LINKED) dm_dig->cur_sta_cstate = DIG_STA_CONNECT; else dm_dig->cur_sta_cstate = DIG_STA_DISCONNECT; if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP || rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC) dm_dig->cur_sta_cstate = DIG_STA_DISCONNECT; dm_dig_max = DM_DIG_MAX; dm_dig_min = DM_DIG_MIN; dig_maxofmin = DM_DIG_MAX_AP; dig_dynamic_min = dm_dig->dig_min_0; bfirstconnect = ((mac->link_state >= MAC80211_LINKED) ? true : false) && !dm_dig->media_connect_0; dm_dig->rssi_val_min = rtl88e_dm_initial_gain_min_pwdb(hw); if (mac->link_state >= MAC80211_LINKED) { if ((dm_dig->rssi_val_min + 20) > dm_dig_max) dm_dig->rx_gain_max = dm_dig_max; else if ((dm_dig->rssi_val_min + 20) < dm_dig_min) dm_dig->rx_gain_max = dm_dig_min; else dm_dig->rx_gain_max = dm_dig->rssi_val_min + 20; if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) { dig_dynamic_min = dm_dig->antdiv_rssi_max; } else { if (dm_dig->rssi_val_min < dm_dig_min) dig_dynamic_min = dm_dig_min; else if (dm_dig->rssi_val_min < dig_maxofmin) dig_dynamic_min = dig_maxofmin; else dig_dynamic_min = dm_dig->rssi_val_min; } } else { dm_dig->rx_gain_max = dm_dig_max; dig_dynamic_min = dm_dig_min; RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n"); } if (rtlpriv->falsealm_cnt.cnt_all > 10000) { dm_dig->large_fa_hit++; if (dm_dig->forbidden_igi < current_igi) { dm_dig->forbidden_igi = current_igi; dm_dig->large_fa_hit = 1; } if (dm_dig->large_fa_hit >= 3) { if ((dm_dig->forbidden_igi + 1) > dm_dig->rx_gain_max) dm_dig->rx_gain_min = dm_dig->rx_gain_max; else dm_dig->rx_gain_min = dm_dig->forbidden_igi + 1; dm_dig->recover_cnt = 3600; } } else { if (dm_dig->recover_cnt != 0) { dm_dig->recover_cnt--; } else { if (dm_dig->large_fa_hit == 0) { if ((dm_dig->forbidden_igi - 1) < dig_dynamic_min) { dm_dig->forbidden_igi = dig_dynamic_min; dm_dig->rx_gain_min = dig_dynamic_min; } else { dm_dig->forbidden_igi--; dm_dig->rx_gain_min = dm_dig->forbidden_igi + 1; } } else if (dm_dig->large_fa_hit == 3) { dm_dig->large_fa_hit = 0; } } } if (dm_dig->cur_sta_cstate == DIG_STA_CONNECT) { if (bfirstconnect) { current_igi = dm_dig->rssi_val_min; } else { if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH2) current_igi += 2; else if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH1) current_igi++; else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0) current_igi--; } } else { if (rtlpriv->falsealm_cnt.cnt_all > 10000) current_igi += 2; else if (rtlpriv->falsealm_cnt.cnt_all > 8000) current_igi++; else if (rtlpriv->falsealm_cnt.cnt_all < 500) current_igi--; } if (current_igi > DM_DIG_FA_UPPER) current_igi = DM_DIG_FA_UPPER; else if (current_igi < DM_DIG_FA_LOWER) current_igi = DM_DIG_FA_LOWER; if (rtlpriv->falsealm_cnt.cnt_all > 10000) current_igi = DM_DIG_FA_UPPER; dm_dig->cur_igvalue = current_igi; rtl88e_dm_write_dig(hw); dm_dig->media_connect_0 = ((mac->link_state >= MAC80211_LINKED) ? true : false); dm_dig->dig_min_0 = dig_dynamic_min; rtl88e_dm_cck_packet_detection_thresh(hw); } static void rtl88e_dm_init_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.dynamic_txpower_enable = false; rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; } static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); long undec_sm_pwdb; if (!rtlpriv->dm.dynamic_txpower_enable) return; if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; return; } if ((mac->link_state < MAC80211_LINKED) && (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, "Not connected to any\n"); rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; return; } if (mac->link_state >= MAC80211_LINKED) { if (mac->opmode == NL80211_IFTYPE_ADHOC) { undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "AP Client PWDB = 0x%lx\n", undec_sm_pwdb); } else { undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "STA Default Port PWDB = 0x%lx\n", undec_sm_pwdb); } } else { undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "AP Ext Port PWDB = 0x%lx\n", undec_sm_pwdb); } if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr = 0x0)\n"); } else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) && (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr = 0x10)\n"); } else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_NORMAL\n"); } if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "PHY_SetTxPowerLevel8192S() Channel = %d\n", rtlphy->current_channel); rtl88e_phy_set_txpower_level(hw, rtlphy->current_channel); } rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl; } void rtl88e_dm_write_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_dig = &rtlpriv->dm_digtable; RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n", dm_dig->cur_igvalue, dm_dig->pre_igvalue, dm_dig->back_val); if (dm_dig->cur_igvalue > 0x3f) dm_dig->cur_igvalue = 0x3f; if (dm_dig->pre_igvalue != dm_dig->cur_igvalue) { rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, dm_dig->cur_igvalue); dm_dig->pre_igvalue = dm_dig->cur_igvalue; } } static void rtl88e_dm_pwdb_monitor(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_sta_info *drv_priv; static u64 last_record_txok_cnt; static u64 last_record_rxok_cnt; long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff; if (rtlhal->oem_id == RT_CID_819X_HP) { u64 cur_txok_cnt = 0; u64 cur_rxok_cnt = 0; cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_record_txok_cnt; cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_record_rxok_cnt; last_record_txok_cnt = cur_txok_cnt; last_record_rxok_cnt = cur_rxok_cnt; if (cur_rxok_cnt > (cur_txok_cnt * 6)) rtl_write_dword(rtlpriv, REG_ARFR0, 0x8f015); else rtl_write_dword(rtlpriv, REG_ARFR0, 0xff015); } /* AP & ADHOC & MESH */ spin_lock_bh(&rtlpriv->locks.entry_list_lock); list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { if (drv_priv->rssi_stat.undec_sm_pwdb < tmp_entry_min_pwdb) tmp_entry_min_pwdb = drv_priv->rssi_stat.undec_sm_pwdb; if (drv_priv->rssi_stat.undec_sm_pwdb > tmp_entry_max_pwdb) tmp_entry_max_pwdb = drv_priv->rssi_stat.undec_sm_pwdb; } spin_unlock_bh(&rtlpriv->locks.entry_list_lock); /* If associated entry is found */ if (tmp_entry_max_pwdb != 0) { rtlpriv->dm.entry_max_undec_sm_pwdb = tmp_entry_max_pwdb; RTPRINT(rtlpriv, FDM, DM_PWDB, "EntryMaxPWDB = 0x%lx(%ld)\n", tmp_entry_max_pwdb, tmp_entry_max_pwdb); } else { rtlpriv->dm.entry_max_undec_sm_pwdb = 0; } /* If associated entry is found */ if (tmp_entry_min_pwdb != 0xff) { rtlpriv->dm.entry_min_undec_sm_pwdb = tmp_entry_min_pwdb; RTPRINT(rtlpriv, FDM, DM_PWDB, "EntryMinPWDB = 0x%lx(%ld)\n", tmp_entry_min_pwdb, tmp_entry_min_pwdb); } else { rtlpriv->dm.entry_min_undec_sm_pwdb = 0; } /* Indicate Rx signal strength to FW. */ if (rtlpriv->dm.useramask) { u8 h2c_parameter[3] = { 0 }; h2c_parameter[2] = (u8)(rtlpriv->dm.undec_sm_pwdb & 0xFF); h2c_parameter[0] = 0x20; } else { rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undec_sm_pwdb); } } void rtl88e_dm_init_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.current_turbo_edca = false; rtlpriv->dm.is_any_nonbepkts = false; rtlpriv->dm.is_cur_rdlstate = false; } static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); static u64 last_txok_cnt; static u64 last_rxok_cnt; static u32 last_bt_edca_ul; static u32 last_bt_edca_dl; u64 cur_txok_cnt = 0; u64 cur_rxok_cnt = 0; u32 edca_be_ul = 0x5ea42b; u32 edca_be_dl = 0x5ea42b; bool bt_change_edca = false; if ((last_bt_edca_ul != rtlpriv->btcoexist.bt_edca_ul) || (last_bt_edca_dl != rtlpriv->btcoexist.bt_edca_dl)) { rtlpriv->dm.current_turbo_edca = false; last_bt_edca_ul = rtlpriv->btcoexist.bt_edca_ul; last_bt_edca_dl = rtlpriv->btcoexist.bt_edca_dl; } if (rtlpriv->btcoexist.bt_edca_ul != 0) { edca_be_ul = rtlpriv->btcoexist.bt_edca_ul; bt_change_edca = true; } if (rtlpriv->btcoexist.bt_edca_dl != 0) { edca_be_ul = rtlpriv->btcoexist.bt_edca_dl; bt_change_edca = true; } if (mac->link_state != MAC80211_LINKED) { rtlpriv->dm.current_turbo_edca = false; return; } if ((bt_change_edca) || ((!rtlpriv->dm.is_any_nonbepkts) && (!rtlpriv->dm.disable_framebursting))) { cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; if (cur_rxok_cnt > 4 * cur_txok_cnt) { if (!rtlpriv->dm.is_cur_rdlstate || !rtlpriv->dm.current_turbo_edca) { rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be_dl); rtlpriv->dm.is_cur_rdlstate = true; } } else { if (rtlpriv->dm.is_cur_rdlstate || !rtlpriv->dm.current_turbo_edca) { rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be_ul); rtlpriv->dm.is_cur_rdlstate = false; } } rtlpriv->dm.current_turbo_edca = true; } else { if (rtlpriv->dm.current_turbo_edca) { u8 tmp = AC0_BE; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, &tmp); rtlpriv->dm.current_turbo_edca = false; } } rtlpriv->dm.is_any_nonbepkts = false; last_txok_cnt = rtlpriv->stats.txbytesunicast; last_rxok_cnt = rtlpriv->stats.rxbytesunicast; } static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u8 thermalvalue = 0, delta, delta_lck, delta_iqk, offset; u8 thermalvalue_avg_count = 0; u32 thermalvalue_avg = 0; long ele_d, temp_cck; char ofdm_index[2], cck_index = 0, ofdm_index_old[2] = {0, 0}, cck_index_old = 0; int i = 0; /*bool is2t = false;*/ u8 ofdm_min_index = 6, rf = 1; /*u8 index_for_channel;*/ enum _power_dec_inc {power_dec, power_inc}; /*0.1 the following TWO tables decide the *final index of OFDM/CCK swing table */ char delta_swing_table_idx[2][15] = { {0, 0, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11}, {0, 0, -1, -2, -3, -4, -4, -4, -4, -5, -7, -8, -9, -9, -10} }; u8 thermal_threshold[2][15] = { {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 27}, {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 25, 25, 25} }; /*Initilization (7 steps in total) */ rtlpriv->dm.txpower_trackinginit = true; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "dm_txpower_track_cb_therm\n"); thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xfc00); if (!thermalvalue) return; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n", thermalvalue, rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter); /*1. Query OFDM Default Setting: Path A*/ ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) & MASKOFDM_D; for (i = 0; i < OFDM_TABLE_LENGTH; i++) { if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { ofdm_index_old[0] = (u8)i; rtldm->swing_idx_ofdm_base[RF90_PATH_A] = (u8)i; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index = 0x%x\n", ROFDM0_XATXIQIMBALANCE, ele_d, ofdm_index_old[0]); break; } } /*2.Query CCK default setting From 0xa24*/ temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK; for (i = 0; i < CCK_TABLE_LENGTH; i++) { if (rtlpriv->dm.cck_inch14) { if (memcmp(&temp_cck, &cck_tbl_ch14[i][2], 4) == 0) { cck_index_old = (u8)i; rtldm->swing_idx_cck_base = (u8)i; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch 14 %d\n", RCCK0_TXFILTER2, temp_cck, cck_index_old, rtlpriv->dm.cck_inch14); break; } } else { if (memcmp(&temp_cck, &cck_tbl_ch1_13[i][2], 4) == 0) { cck_index_old = (u8)i; rtldm->swing_idx_cck_base = (u8)i; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n", RCCK0_TXFILTER2, temp_cck, cck_index_old, rtlpriv->dm.cck_inch14); break; } } } /*3 Initialize ThermalValues of RFCalibrateInfo*/ if (!rtldm->thermalvalue) { rtlpriv->dm.thermalvalue = rtlefuse->eeprom_thermalmeter; rtlpriv->dm.thermalvalue_lck = thermalvalue; rtlpriv->dm.thermalvalue_iqk = thermalvalue; for (i = 0; i < rf; i++) rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i]; rtlpriv->dm.cck_index = cck_index_old; } /*4 Calculate average thermal meter*/ rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermalvalue; rtldm->thermalvalue_avg_index++; if (rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_88E) rtldm->thermalvalue_avg_index = 0; for (i = 0; i < AVG_THERMAL_NUM_88E; i++) { if (rtldm->thermalvalue_avg[i]) { thermalvalue_avg += rtldm->thermalvalue_avg[i]; thermalvalue_avg_count++; } } if (thermalvalue_avg_count) thermalvalue = (u8)(thermalvalue_avg / thermalvalue_avg_count); /* 5 Calculate delta, delta_LCK, delta_IQK.*/ if (rtlhal->reloadtxpowerindex) { delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ? (thermalvalue - rtlefuse->eeprom_thermalmeter) : (rtlefuse->eeprom_thermalmeter - thermalvalue); rtlhal->reloadtxpowerindex = false; rtlpriv->dm.done_txpower = false; } else if (rtlpriv->dm.done_txpower) { delta = (thermalvalue > rtlpriv->dm.thermalvalue) ? (thermalvalue - rtlpriv->dm.thermalvalue) : (rtlpriv->dm.thermalvalue - thermalvalue); } else { delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ? (thermalvalue - rtlefuse->eeprom_thermalmeter) : (rtlefuse->eeprom_thermalmeter - thermalvalue); } delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ? (thermalvalue - rtlpriv->dm.thermalvalue_lck) : (rtlpriv->dm.thermalvalue_lck - thermalvalue); delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ? (thermalvalue - rtlpriv->dm.thermalvalue_iqk) : (rtlpriv->dm.thermalvalue_iqk - thermalvalue); RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n", thermalvalue, rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter, delta, delta_lck, delta_iqk); /* 6 If necessary, do LCK.*/ if (delta_lck >= 8) { rtlpriv->dm.thermalvalue_lck = thermalvalue; rtl88e_phy_lc_calibrate(hw); } /* 7 If necessary, move the index of * swing table to adjust Tx power. */ if (delta > 0 && rtlpriv->dm.txpower_track_control) { delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ? (thermalvalue - rtlefuse->eeprom_thermalmeter) : (rtlefuse->eeprom_thermalmeter - thermalvalue); /* 7.1 Get the final CCK_index and OFDM_index for each * swing table. */ if (thermalvalue > rtlefuse->eeprom_thermalmeter) { CAL_SWING_OFF(offset, power_inc, INDEX_MAPPING_NUM, delta); for (i = 0; i < rf; i++) ofdm_index[i] = rtldm->ofdm_index[i] + delta_swing_table_idx[power_inc][offset]; cck_index = rtldm->cck_index + delta_swing_table_idx[power_inc][offset]; } else { CAL_SWING_OFF(offset, power_dec, INDEX_MAPPING_NUM, delta); for (i = 0; i < rf; i++) ofdm_index[i] = rtldm->ofdm_index[i] + delta_swing_table_idx[power_dec][offset]; cck_index = rtldm->cck_index + delta_swing_table_idx[power_dec][offset]; } /* 7.2 Handle boundary conditions of index.*/ for (i = 0; i < rf; i++) { if (ofdm_index[i] > OFDM_TABLE_SIZE-1) ofdm_index[i] = OFDM_TABLE_SIZE-1; else if (rtldm->ofdm_index[i] < ofdm_min_index) ofdm_index[i] = ofdm_min_index; } if (cck_index > CCK_TABLE_SIZE-1) cck_index = CCK_TABLE_SIZE-1; else if (cck_index < 0) cck_index = 0; /*7.3Configure the Swing Table to adjust Tx Power.*/ if (rtlpriv->dm.txpower_track_control) { rtldm->done_txpower = true; rtldm->swing_idx_ofdm[RF90_PATH_A] = (u8)ofdm_index[RF90_PATH_A]; rtldm->swing_idx_cck = cck_index; if (rtldm->swing_idx_ofdm_cur != rtldm->swing_idx_ofdm[0]) { rtldm->swing_idx_ofdm_cur = rtldm->swing_idx_ofdm[0]; rtldm->swing_flag_ofdm = true; } if (rtldm->swing_idx_cck_cur != rtldm->swing_idx_cck) { rtldm->swing_idx_cck_cur = rtldm->swing_idx_cck; rtldm->swing_flag_cck = true; } dm_tx_pwr_track_set_pwr(hw, TXAGC, 0, 0); } } if (delta_iqk >= 8) { rtlpriv->dm.thermalvalue_iqk = thermalvalue; rtl88e_phy_iq_calibrate(hw, false); } if (rtldm->txpower_track_control) rtldm->thermalvalue = thermalvalue; rtldm->txpowercount = 0; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "end\n"); } static void rtl88e_dm_init_txpower_tracking(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.txpower_tracking = true; rtlpriv->dm.txpower_trackinginit = false; rtlpriv->dm.txpowercount = 0; rtlpriv->dm.txpower_track_control = true; rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A] = 12; rtlpriv->dm.swing_idx_ofdm_cur = 12; rtlpriv->dm.swing_flag_ofdm = false; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "rtlpriv->dm.txpower_tracking = %d\n", rtlpriv->dm.txpower_tracking); } void rtl88e_dm_check_txpower_tracking(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); static u8 tm_trigger; if (!rtlpriv->dm.txpower_tracking) return; if (!tm_trigger) { rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17)|BIT(16), 0x03); RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Trigger 88E Thermal Meter!!\n"); tm_trigger = 1; return; } else { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Schedule TxPowerTracking !!\n"); dm_txpower_track_cb_therm(hw); tm_trigger = 0; } } void rtl88e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rate_adaptive *p_ra = &rtlpriv->ra; p_ra->ratr_state = DM_RATR_STA_INIT; p_ra->pre_ratr_state = DM_RATR_STA_INIT; if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) rtlpriv->dm.useramask = true; else rtlpriv->dm.useramask = false; } static void rtl88e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rate_adaptive *p_ra = &rtlpriv->ra; u32 low_rssithresh_for_ra, high_rssithresh_for_ra; struct ieee80211_sta *sta = NULL; if (is_hal_stop(rtlhal)) { RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "driver is going to unload\n"); return; } if (!rtlpriv->dm.useramask) { RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "driver does not control rate adaptive mask\n"); return; } if (mac->link_state == MAC80211_LINKED && mac->opmode == NL80211_IFTYPE_STATION) { switch (p_ra->pre_ratr_state) { case DM_RATR_STA_HIGH: high_rssithresh_for_ra = 50; low_rssithresh_for_ra = 20; break; case DM_RATR_STA_MIDDLE: high_rssithresh_for_ra = 55; low_rssithresh_for_ra = 20; break; case DM_RATR_STA_LOW: high_rssithresh_for_ra = 50; low_rssithresh_for_ra = 25; break; default: high_rssithresh_for_ra = 50; low_rssithresh_for_ra = 20; break; } if (rtlpriv->dm.undec_sm_pwdb > (long)high_rssithresh_for_ra) p_ra->ratr_state = DM_RATR_STA_HIGH; else if (rtlpriv->dm.undec_sm_pwdb > (long)low_rssithresh_for_ra) p_ra->ratr_state = DM_RATR_STA_MIDDLE; else p_ra->ratr_state = DM_RATR_STA_LOW; if (p_ra->pre_ratr_state != p_ra->ratr_state) { RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI = %ld\n", rtlpriv->dm.undec_sm_pwdb); RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI_LEVEL = %d\n", p_ra->ratr_state); RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "PreState = %d, CurState = %d\n", p_ra->pre_ratr_state, p_ra->ratr_state); rcu_read_lock(); sta = rtl_find_sta(hw, mac->bssid); if (sta) rtlpriv->cfg->ops->update_rate_tbl(hw, sta, p_ra->ratr_state); rcu_read_unlock(); p_ra->pre_ratr_state = p_ra->ratr_state; } } } static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct ps_t *dm_pstable = &rtlpriv->dm_pstable; dm_pstable->pre_ccastate = CCA_MAX; dm_pstable->cur_ccasate = CCA_MAX; dm_pstable->pre_rfstate = RF_MAX; dm_pstable->cur_rfstate = RF_MAX; dm_pstable->rssi_val_min = 0; } static void rtl88e_dm_update_rx_idle_ant(struct ieee80211_hw *hw, u8 ant) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct fast_ant_training *pfat_table = &rtldm->fat_table; u32 default_ant, optional_ant; if (pfat_table->rx_idle_ant != ant) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "need to update rx idle ant\n"); if (ant == MAIN_ANT) { default_ant = (pfat_table->rx_idle_ant == CG_TRX_HW_ANTDIV) ? MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX; optional_ant = (pfat_table->rx_idle_ant == CG_TRX_HW_ANTDIV) ? AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX; } else { default_ant = (pfat_table->rx_idle_ant == CG_TRX_HW_ANTDIV) ? AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX; optional_ant = (pfat_table->rx_idle_ant == CG_TRX_HW_ANTDIV) ? MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX; } if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) { rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), default_ant); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), optional_ant); rtl_set_bbreg(hw, DM_REG_ANTSEL_CTRL_11N, BIT(14) | BIT(13) | BIT(12), default_ant); rtl_set_bbreg(hw, DM_REG_RESP_TX_11N, BIT(6) | BIT(7), default_ant); } else if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) { rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), default_ant); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), optional_ant); } } pfat_table->rx_idle_ant = ant; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "RxIdleAnt %s\n", (ant == MAIN_ANT) ? ("MAIN_ANT") : ("AUX_ANT")); } static void rtl88e_dm_update_tx_ant(struct ieee80211_hw *hw, u8 ant, u32 mac_id) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct fast_ant_training *pfat_table = &rtldm->fat_table; u8 target_ant; if (ant == MAIN_ANT) target_ant = MAIN_ANT_CG_TRX; else target_ant = AUX_ANT_CG_TRX; pfat_table->antsel_a[mac_id] = target_ant & BIT(0); pfat_table->antsel_b[mac_id] = (target_ant & BIT(1)) >> 1; pfat_table->antsel_c[mac_id] = (target_ant & BIT(2)) >> 2; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "txfrominfo target ant %s\n", (ant == MAIN_ANT) ? ("MAIN_ANT") : ("AUX_ANT")); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "antsel_tr_mux = 3'b%d%d%d\n", pfat_table->antsel_c[mac_id], pfat_table->antsel_b[mac_id], pfat_table->antsel_a[mac_id]); } static void rtl88e_dm_rx_hw_antena_div_init(struct ieee80211_hw *hw) { u32 value32; /*MAC Setting*/ value32 = rtl_get_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD); rtl_set_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD, value32 | (BIT(23) | BIT(25))); /*Pin Setting*/ rtl_set_bbreg(hw, DM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(10), 0); rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(22), 1); rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(31), 1); /*OFDM Setting*/ rtl_set_bbreg(hw, DM_REG_ANTDIV_PARA1_11N, MASKDWORD, 0x000000a0); /*CCK Setting*/ rtl_set_bbreg(hw, DM_REG_BB_PWR_SAV4_11N, BIT(7), 1); rtl_set_bbreg(hw, DM_REG_CCK_ANTDIV_PARA2_11N, BIT(4), 1); rtl88e_dm_update_rx_idle_ant(hw, MAIN_ANT); rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKLWORD, 0x0201); } static void rtl88e_dm_trx_hw_antenna_div_init(struct ieee80211_hw *hw) { u32 value32; /*MAC Setting*/ value32 = rtl_get_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD); rtl_set_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD, value32 | (BIT(23) | BIT(25))); /*Pin Setting*/ rtl_set_bbreg(hw, DM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(10), 0); rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(22), 0); rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(31), 1); /*OFDM Setting*/ rtl_set_bbreg(hw, DM_REG_ANTDIV_PARA1_11N, MASKDWORD, 0x000000a0); /*CCK Setting*/ rtl_set_bbreg(hw, DM_REG_BB_PWR_SAV4_11N, BIT(7), 1); rtl_set_bbreg(hw, DM_REG_CCK_ANTDIV_PARA2_11N, BIT(4), 1); /*TX Setting*/ rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, BIT(21), 0); rtl88e_dm_update_rx_idle_ant(hw, MAIN_ANT); rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKLWORD, 0x0201); } static void rtl88e_dm_fast_training_init(struct ieee80211_hw *hw) { struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct fast_ant_training *pfat_table = &rtldm->fat_table; u32 ant_combination = 2; u32 value32, i; for (i = 0; i < 6; i++) { pfat_table->bssid[i] = 0; pfat_table->ant_sum[i] = 0; pfat_table->ant_cnt[i] = 0; pfat_table->ant_ave[i] = 0; } pfat_table->train_idx = 0; pfat_table->fat_state = FAT_NORMAL_STATE; /*MAC Setting*/ value32 = rtl_get_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD); rtl_set_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD, value32 | (BIT(23) | BIT(25))); value32 = rtl_get_bbreg(hw, DM_REG_ANT_TRAIN_PARA2_11N, MASKDWORD); rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_PARA2_11N, MASKDWORD, value32 | (BIT(16) | BIT(17))); rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_PARA2_11N, MASKLWORD, 0); rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_PARA1_11N, MASKDWORD, 0); /*Pin Setting*/ rtl_set_bbreg(hw, DM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(10), 0); rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(22), 0); rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(31), 1); /*OFDM Setting*/ rtl_set_bbreg(hw, DM_REG_ANTDIV_PARA1_11N, MASKDWORD, 0x000000a0); /*antenna mapping table*/ rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE0, 1); rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE1, 2); /*TX Setting*/ rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, BIT(21), 1); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), 0); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), 1); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(2) | BIT(1) | BIT(0), (ant_combination - 1)); rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 1); } static void rtl88e_dm_antenna_div_init(struct ieee80211_hw *hw) { struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) rtl88e_dm_rx_hw_antena_div_init(hw); else if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) rtl88e_dm_trx_hw_antenna_div_init(hw); else if (rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV) rtl88e_dm_fast_training_init(hw); } void rtl88e_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw, u8 *pdesc, u32 mac_id) { struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct fast_ant_training *pfat_table = &rtldm->fat_table; if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) || (rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV)) { SET_TX_DESC_ANTSEL_A(pdesc, pfat_table->antsel_a[mac_id]); SET_TX_DESC_ANTSEL_B(pdesc, pfat_table->antsel_b[mac_id]); SET_TX_DESC_ANTSEL_C(pdesc, pfat_table->antsel_c[mac_id]); } } void rtl88e_dm_ant_sel_statistics(struct ieee80211_hw *hw, u8 antsel_tr_mux, u32 mac_id, u32 rx_pwdb_all) { struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct fast_ant_training *pfat_table = &rtldm->fat_table; if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) { if (antsel_tr_mux == MAIN_ANT_CG_TRX) { pfat_table->main_ant_sum[mac_id] += rx_pwdb_all; pfat_table->main_ant_cnt[mac_id]++; } else { pfat_table->aux_ant_sum[mac_id] += rx_pwdb_all; pfat_table->aux_ant_cnt[mac_id]++; } } else if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) { if (antsel_tr_mux == MAIN_ANT_CGCS_RX) { pfat_table->main_ant_sum[mac_id] += rx_pwdb_all; pfat_table->main_ant_cnt[mac_id]++; } else { pfat_table->aux_ant_sum[mac_id] += rx_pwdb_all; pfat_table->aux_ant_cnt[mac_id]++; } } } static void rtl88e_dm_hw_ant_div(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct rtl_sta_info *drv_priv; struct fast_ant_training *pfat_table = &rtldm->fat_table; struct dig_t *dm_dig = &rtlpriv->dm_digtable; u32 i, min_rssi = 0xff, ant_div_max_rssi = 0; u32 max_rssi = 0, local_min_rssi, local_max_rssi; u32 main_rssi, aux_rssi; u8 rx_idle_ant = 0, target_ant = 7; /*for sta its self*/ i = 0; main_rssi = (pfat_table->main_ant_cnt[i] != 0) ? (pfat_table->main_ant_sum[i] / pfat_table->main_ant_cnt[i]) : 0; aux_rssi = (pfat_table->aux_ant_cnt[i] != 0) ? (pfat_table->aux_ant_sum[i] / pfat_table->aux_ant_cnt[i]) : 0; target_ant = (main_rssi == aux_rssi) ? pfat_table->rx_idle_ant : ((main_rssi >= aux_rssi) ? MAIN_ANT : AUX_ANT); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "main_ant_sum %d main_ant_cnt %d\n", pfat_table->main_ant_sum[i], pfat_table->main_ant_cnt[i]); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "aux_ant_sum %d aux_ant_cnt %d\n", pfat_table->aux_ant_sum[i], pfat_table->aux_ant_cnt[i]); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "main_rssi %d aux_rssi%d\n", main_rssi, aux_rssi); local_max_rssi = (main_rssi > aux_rssi) ? main_rssi : aux_rssi; if ((local_max_rssi > ant_div_max_rssi) && (local_max_rssi < 40)) ant_div_max_rssi = local_max_rssi; if (local_max_rssi > max_rssi) max_rssi = local_max_rssi; if ((pfat_table->rx_idle_ant == MAIN_ANT) && (main_rssi == 0)) main_rssi = aux_rssi; else if ((pfat_table->rx_idle_ant == AUX_ANT) && (aux_rssi == 0)) aux_rssi = main_rssi; local_min_rssi = (main_rssi > aux_rssi) ? aux_rssi : main_rssi; if (local_min_rssi < min_rssi) { min_rssi = local_min_rssi; rx_idle_ant = target_ant; } if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) rtl88e_dm_update_tx_ant(hw, target_ant, i); if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP || rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC) { spin_lock_bh(&rtlpriv->locks.entry_list_lock); list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { i++; main_rssi = (pfat_table->main_ant_cnt[i] != 0) ? (pfat_table->main_ant_sum[i] / pfat_table->main_ant_cnt[i]) : 0; aux_rssi = (pfat_table->aux_ant_cnt[i] != 0) ? (pfat_table->aux_ant_sum[i] / pfat_table->aux_ant_cnt[i]) : 0; target_ant = (main_rssi == aux_rssi) ? pfat_table->rx_idle_ant : ((main_rssi >= aux_rssi) ? MAIN_ANT : AUX_ANT); local_max_rssi = (main_rssi > aux_rssi) ? main_rssi : aux_rssi; if ((local_max_rssi > ant_div_max_rssi) && (local_max_rssi < 40)) ant_div_max_rssi = local_max_rssi; if (local_max_rssi > max_rssi) max_rssi = local_max_rssi; if ((pfat_table->rx_idle_ant == MAIN_ANT) && (main_rssi == 0)) main_rssi = aux_rssi; else if ((pfat_table->rx_idle_ant == AUX_ANT) && (aux_rssi == 0)) aux_rssi = main_rssi; local_min_rssi = (main_rssi > aux_rssi) ? aux_rssi : main_rssi; if (local_min_rssi < min_rssi) { min_rssi = local_min_rssi; rx_idle_ant = target_ant; } if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) rtl88e_dm_update_tx_ant(hw, target_ant, i); } spin_unlock_bh(&rtlpriv->locks.entry_list_lock); } for (i = 0; i < ASSOCIATE_ENTRY_NUM; i++) { pfat_table->main_ant_sum[i] = 0; pfat_table->aux_ant_sum[i] = 0; pfat_table->main_ant_cnt[i] = 0; pfat_table->aux_ant_cnt[i] = 0; } rtl88e_dm_update_rx_idle_ant(hw, rx_idle_ant); dm_dig->antdiv_rssi_max = ant_div_max_rssi; dm_dig->rssi_max = max_rssi; } static void rtl88e_set_next_mac_address_target(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct rtl_sta_info *drv_priv; struct fast_ant_training *pfat_table = &rtldm->fat_table; u32 value32, i, j = 0; if (mac->link_state >= MAC80211_LINKED) { for (i = 0; i < ASSOCIATE_ENTRY_NUM; i++) { if ((pfat_table->train_idx + 1) == ASSOCIATE_ENTRY_NUM) pfat_table->train_idx = 0; else pfat_table->train_idx++; if (pfat_table->train_idx == 0) { value32 = (mac->mac_addr[5] << 8) | mac->mac_addr[4]; rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_PARA2_11N, MASKLWORD, value32); value32 = (mac->mac_addr[3] << 24) | (mac->mac_addr[2] << 16) | (mac->mac_addr[1] << 8) | mac->mac_addr[0]; rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_PARA1_11N, MASKDWORD, value32); break; } if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION) { spin_lock_bh(&rtlpriv->locks.entry_list_lock); list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { j++; if (j != pfat_table->train_idx) continue; value32 = (drv_priv->mac_addr[5] << 8) | drv_priv->mac_addr[4]; rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_PARA2_11N, MASKLWORD, value32); value32 = (drv_priv->mac_addr[3] << 24) | (drv_priv->mac_addr[2] << 16) | (drv_priv->mac_addr[1] << 8) | drv_priv->mac_addr[0]; rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_PARA1_11N, MASKDWORD, value32); break; } spin_unlock_bh(&rtlpriv->locks.entry_list_lock); /*find entry, break*/ if (j == pfat_table->train_idx) break; } } } } static void rtl88e_dm_fast_ant_training(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct fast_ant_training *pfat_table = &rtldm->fat_table; u32 i, max_rssi = 0; u8 target_ant = 2; bool bpkt_filter_match = false; if (pfat_table->fat_state == FAT_TRAINING_STATE) { for (i = 0; i < 7; i++) { if (pfat_table->ant_cnt[i] == 0) { pfat_table->ant_ave[i] = 0; } else { pfat_table->ant_ave[i] = pfat_table->ant_sum[i] / pfat_table->ant_cnt[i]; bpkt_filter_match = true; } if (pfat_table->ant_ave[i] > max_rssi) { max_rssi = pfat_table->ant_ave[i]; target_ant = (u8) i; } } if (bpkt_filter_match == false) { rtl_set_bbreg(hw, DM_REG_TXAGC_A_1_MCS32_11N, BIT(16), 0); rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 0); } else { rtl_set_bbreg(hw, DM_REG_TXAGC_A_1_MCS32_11N, BIT(16), 0); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), target_ant); rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, BIT(21), 1); pfat_table->antsel_a[pfat_table->train_idx] = target_ant & BIT(0); pfat_table->antsel_b[pfat_table->train_idx] = (target_ant & BIT(1)) >> 1; pfat_table->antsel_c[pfat_table->train_idx] = (target_ant & BIT(2)) >> 2; if (target_ant == 0) rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 0); } for (i = 0; i < 7; i++) { pfat_table->ant_sum[i] = 0; pfat_table->ant_cnt[i] = 0; } pfat_table->fat_state = FAT_NORMAL_STATE; return; } if (pfat_table->fat_state == FAT_NORMAL_STATE) { rtl88e_set_next_mac_address_target(hw); pfat_table->fat_state = FAT_TRAINING_STATE; rtl_set_bbreg(hw, DM_REG_TXAGC_A_1_MCS32_11N, BIT(16), 1); rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 1); mod_timer(&rtlpriv->works.fast_antenna_training_timer, jiffies + MSECS(RTL_WATCH_DOG_TIME)); } } void rtl88e_dm_fast_antenna_training_callback(unsigned long data) { struct ieee80211_hw *hw = (struct ieee80211_hw *)data; rtl88e_dm_fast_ant_training(hw); } static void rtl88e_dm_antenna_diversity(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct fast_ant_training *pfat_table = &rtldm->fat_table; if (mac->link_state < MAC80211_LINKED) { RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "No Link\n"); if (pfat_table->becomelinked) { RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "need to turn off HW AntDiv\n"); rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 0); rtl_set_bbreg(hw, DM_REG_CCK_ANTDIV_PARA1_11N, BIT(15), 0); if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, BIT(21), 0); pfat_table->becomelinked = (mac->link_state == MAC80211_LINKED) ? true : false; } return; } else { if (!pfat_table->becomelinked) { RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Need to turn on HW AntDiv\n"); rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 1); rtl_set_bbreg(hw, DM_REG_CCK_ANTDIV_PARA1_11N, BIT(15), 1); if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, BIT(21), 1); pfat_table->becomelinked = (mac->link_state >= MAC80211_LINKED) ? true : false; } } if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) || (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)) rtl88e_dm_hw_ant_div(hw); else if (rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV) rtl88e_dm_fast_ant_training(hw); } void rtl88e_dm_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; rtl88e_dm_diginit(hw); rtl88e_dm_init_dynamic_txpower(hw); rtl88e_dm_init_edca_turbo(hw); rtl88e_dm_init_rate_adaptive_mask(hw); rtl88e_dm_init_txpower_tracking(hw); rtl92c_dm_init_dynamic_bb_powersaving(hw); rtl88e_dm_antenna_div_init(hw); } void rtl88e_dm_watchdog(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); bool fw_current_inpsmode = false; bool fw_ps_awake = true; rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, (u8 *)(&fw_current_inpsmode)); rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON, (u8 *)(&fw_ps_awake)); if (ppsc->p2p_ps_info.p2p_ps_mode) fw_ps_awake = false; if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) && fw_ps_awake) && (!ppsc->rfchange_inprogress)) { rtl88e_dm_pwdb_monitor(hw); rtl88e_dm_dig(hw); rtl88e_dm_false_alarm_counter_statistics(hw); rtl92c_dm_dynamic_txpower(hw); rtl88e_dm_check_txpower_tracking(hw); rtl88e_dm_refresh_rate_adaptive_mask(hw); rtl88e_dm_check_edca_turbo(hw); rtl88e_dm_antenna_diversity(hw); } }
gpl-2.0
0x19/whatever
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
416
21489
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../usb.h" #include "../ps.h" #include "../base.h" #include "reg.h" #include "def.h" #include "phy.h" #include "rf.h" #include "dm.h" #include "mac.h" #include "trx.h" #include "../rtl8192c/fw_common.h" static int _ConfigVerTOutEP(struct ieee80211_hw *hw) { u8 ep_cfg, txqsele; u8 ep_nums = 0; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); rtlusb->out_queue_sel = 0; ep_cfg = rtl_read_byte(rtlpriv, REG_TEST_SIE_OPTIONAL); ep_cfg = (ep_cfg & USB_TEST_EP_MASK) >> USB_TEST_EP_SHIFT; switch (ep_cfg) { case 0: /* 2 bulk OUT, 1 bulk IN */ case 3: rtlusb->out_queue_sel = TX_SELE_HQ | TX_SELE_LQ; ep_nums = 2; break; case 1: /* 1 bulk IN/OUT => map all endpoint to Low queue */ case 2: /* 1 bulk IN, 1 bulk OUT => map all endpoint to High queue */ txqsele = rtl_read_byte(rtlpriv, REG_TEST_USB_TXQS); if (txqsele & 0x0F) /* /map all endpoint to High queue */ rtlusb->out_queue_sel = TX_SELE_HQ; else if (txqsele&0xF0) /* map all endpoint to Low queue */ rtlusb->out_queue_sel = TX_SELE_LQ; ep_nums = 1; break; default: break; } return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL; } static int _ConfigVerNOutEP(struct ieee80211_hw *hw) { u8 ep_cfg; u8 ep_nums = 0; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); rtlusb->out_queue_sel = 0; /* Normal and High queue */ ep_cfg = rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 1)); if (ep_cfg & USB_NORMAL_SIE_EP_MASK) { rtlusb->out_queue_sel |= TX_SELE_HQ; ep_nums++; } if ((ep_cfg >> USB_NORMAL_SIE_EP_SHIFT) & USB_NORMAL_SIE_EP_MASK) { rtlusb->out_queue_sel |= TX_SELE_NQ; ep_nums++; } /* Low queue */ ep_cfg = rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 2)); if (ep_cfg & USB_NORMAL_SIE_EP_MASK) { rtlusb->out_queue_sel |= TX_SELE_LQ; ep_nums++; } return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL; } static void _TwoOutEpMapping(struct ieee80211_hw *hw, bool bIsChipB, bool bwificfg, struct rtl_ep_map *ep_map) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (bwificfg) { /* for WMM */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "USB Chip-B & WMM Setting.....\n"); ep_map->ep_mapping[RTL_TXQ_BE] = 2; ep_map->ep_mapping[RTL_TXQ_BK] = 3; ep_map->ep_mapping[RTL_TXQ_VI] = 3; ep_map->ep_mapping[RTL_TXQ_VO] = 2; ep_map->ep_mapping[RTL_TXQ_MGT] = 2; ep_map->ep_mapping[RTL_TXQ_BCN] = 2; ep_map->ep_mapping[RTL_TXQ_HI] = 2; } else { /* typical setting */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "USB typical Setting.....\n"); ep_map->ep_mapping[RTL_TXQ_BE] = 3; ep_map->ep_mapping[RTL_TXQ_BK] = 3; ep_map->ep_mapping[RTL_TXQ_VI] = 2; ep_map->ep_mapping[RTL_TXQ_VO] = 2; ep_map->ep_mapping[RTL_TXQ_MGT] = 2; ep_map->ep_mapping[RTL_TXQ_BCN] = 2; ep_map->ep_mapping[RTL_TXQ_HI] = 2; } } static void _ThreeOutEpMapping(struct ieee80211_hw *hw, bool bwificfg, struct rtl_ep_map *ep_map) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (bwificfg) { /* for WMM */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "USB 3EP Setting for WMM.....\n"); ep_map->ep_mapping[RTL_TXQ_BE] = 5; ep_map->ep_mapping[RTL_TXQ_BK] = 3; ep_map->ep_mapping[RTL_TXQ_VI] = 3; ep_map->ep_mapping[RTL_TXQ_VO] = 2; ep_map->ep_mapping[RTL_TXQ_MGT] = 2; ep_map->ep_mapping[RTL_TXQ_BCN] = 2; ep_map->ep_mapping[RTL_TXQ_HI] = 2; } else { /* typical setting */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "USB 3EP Setting for typical.....\n"); ep_map->ep_mapping[RTL_TXQ_BE] = 5; ep_map->ep_mapping[RTL_TXQ_BK] = 5; ep_map->ep_mapping[RTL_TXQ_VI] = 3; ep_map->ep_mapping[RTL_TXQ_VO] = 2; ep_map->ep_mapping[RTL_TXQ_MGT] = 2; ep_map->ep_mapping[RTL_TXQ_BCN] = 2; ep_map->ep_mapping[RTL_TXQ_HI] = 2; } } static void _OneOutEpMapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map) { ep_map->ep_mapping[RTL_TXQ_BE] = 2; ep_map->ep_mapping[RTL_TXQ_BK] = 2; ep_map->ep_mapping[RTL_TXQ_VI] = 2; ep_map->ep_mapping[RTL_TXQ_VO] = 2; ep_map->ep_mapping[RTL_TXQ_MGT] = 2; ep_map->ep_mapping[RTL_TXQ_BCN] = 2; ep_map->ep_mapping[RTL_TXQ_HI] = 2; } static int _out_ep_mapping(struct ieee80211_hw *hw) { int err = 0; bool bIsChipN, bwificfg = false; struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); struct rtl_ep_map *ep_map = &(rtlusb->ep_map); bIsChipN = IS_NORMAL_CHIP(rtlhal->version); switch (rtlusb->out_ep_nums) { case 2: _TwoOutEpMapping(hw, bIsChipN, bwificfg, ep_map); break; case 3: /* Test chip doesn't support three out EPs. */ if (!bIsChipN) { err = -EINVAL; goto err_out; } _ThreeOutEpMapping(hw, bIsChipN, ep_map); break; case 1: _OneOutEpMapping(hw, ep_map); break; default: err = -EINVAL; break; } err_out: return err; } /* endpoint mapping */ int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw) { struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); int error = 0; if (likely(IS_NORMAL_CHIP(rtlhal->version))) error = _ConfigVerNOutEP(hw); else error = _ConfigVerTOutEP(hw); if (error) goto err_out; error = _out_ep_mapping(hw); if (error) goto err_out; err_out: return error; } u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index) { u16 hw_queue_index; if (unlikely(ieee80211_is_beacon(fc))) { hw_queue_index = RTL_TXQ_BCN; goto out; } if (ieee80211_is_mgmt(fc)) { hw_queue_index = RTL_TXQ_MGT; goto out; } switch (mac80211_queue_index) { case 0: hw_queue_index = RTL_TXQ_VO; break; case 1: hw_queue_index = RTL_TXQ_VI; break; case 2: hw_queue_index = RTL_TXQ_BE; break; case 3: hw_queue_index = RTL_TXQ_BK; break; default: hw_queue_index = RTL_TXQ_BE; RT_ASSERT(false, "QSLT_BE queue, skb_queue:%d\n", mac80211_queue_index); break; } out: return hw_queue_index; } static enum rtl_desc_qsel _rtl8192cu_mq_to_descq(struct ieee80211_hw *hw, __le16 fc, u16 mac80211_queue_index) { enum rtl_desc_qsel qsel; struct rtl_priv *rtlpriv = rtl_priv(hw); if (unlikely(ieee80211_is_beacon(fc))) { qsel = QSLT_BEACON; goto out; } if (ieee80211_is_mgmt(fc)) { qsel = QSLT_MGNT; goto out; } switch (mac80211_queue_index) { case 0: /* VO */ qsel = QSLT_VO; RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG, "VO queue, set qsel = 0x%x\n", QSLT_VO); break; case 1: /* VI */ qsel = QSLT_VI; RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG, "VI queue, set qsel = 0x%x\n", QSLT_VI); break; case 3: /* BK */ qsel = QSLT_BK; RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG, "BK queue, set qsel = 0x%x\n", QSLT_BK); break; case 2: /* BE */ default: qsel = QSLT_BE; RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG, "BE queue, set qsel = 0x%x\n", QSLT_BE); break; } out: return qsel; } /* =============================================================== */ /*---------------------------------------------------------------------- * * Rx handler * *---------------------------------------------------------------------- */ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, struct ieee80211_rx_status *rx_status, u8 *pdesc, struct sk_buff *skb) { struct rx_fwinfo_92c *p_drvinfo; struct rx_desc_92c *p_desc = (struct rx_desc_92c *)pdesc; u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc); stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc); stats->rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(pdesc) * RX_DRV_INFO_SIZE_UNIT; stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03); stats->icv = (u16) GET_RX_DESC_ICV(pdesc); stats->crc = (u16) GET_RX_DESC_CRC32(pdesc); stats->hwerror = (stats->crc | stats->icv); stats->decrypted = !GET_RX_DESC_SWDEC(pdesc); stats->rate = (u8) GET_RX_DESC_RX_MCS(pdesc); stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc); stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1) && (GET_RX_DESC_FAGGR(pdesc) == 1)); stats->timestamp_low = GET_RX_DESC_TSFL(pdesc); stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; if (GET_RX_DESC_CRC32(pdesc)) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (!GET_RX_DESC_SWDEC(pdesc)) rx_status->flag |= RX_FLAG_DECRYPTED; if (GET_RX_DESC_BW(pdesc)) rx_status->flag |= RX_FLAG_40MHZ; if (GET_RX_DESC_RX_HT(pdesc)) rx_status->flag |= RX_FLAG_HT; rx_status->flag |= RX_FLAG_MACTIME_START; if (stats->decrypted) rx_status->flag |= RX_FLAG_DECRYPTED; rx_status->rate_idx = rtlwifi_rate_mapping(hw, (bool)GET_RX_DESC_RX_HT(pdesc), (u8)GET_RX_DESC_RX_MCS(pdesc), (bool)GET_RX_DESC_PAGGR(pdesc)); rx_status->mactime = GET_RX_DESC_TSFL(pdesc); if (phystatus) { p_drvinfo = (struct rx_fwinfo_92c *)(skb->data + stats->rx_bufshift); rtl92c_translate_rx_signal_stuff(hw, skb, stats, p_desc, p_drvinfo); } /*rx_status->qual = stats->signal; */ rx_status->signal = stats->recvsignalpower + 10; return true; } #define RTL_RX_DRV_INFO_UNIT 8 static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb) { struct ieee80211_rx_status *rx_status = (struct ieee80211_rx_status *)IEEE80211_SKB_RXCB(skb); u32 skb_len, pkt_len, drvinfo_len; struct rtl_priv *rtlpriv = rtl_priv(hw); u8 *rxdesc; struct rtl_stats stats = { .signal = 0, .rate = 0, }; struct rx_fwinfo_92c *p_drvinfo; bool bv; __le16 fc; struct ieee80211_hdr *hdr; memset(rx_status, 0, sizeof(*rx_status)); rxdesc = skb->data; skb_len = skb->len; drvinfo_len = (GET_RX_DESC_DRVINFO_SIZE(rxdesc) * RTL_RX_DRV_INFO_UNIT); pkt_len = GET_RX_DESC_PKT_LEN(rxdesc); /* TODO: Error recovery. drop this skb or something. */ WARN_ON(skb_len < (pkt_len + RTL_RX_DESC_SIZE + drvinfo_len)); stats.length = (u16) GET_RX_DESC_PKT_LEN(rxdesc); stats.rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(rxdesc) * RX_DRV_INFO_SIZE_UNIT; stats.rx_bufshift = (u8) (GET_RX_DESC_SHIFT(rxdesc) & 0x03); stats.icv = (u16) GET_RX_DESC_ICV(rxdesc); stats.crc = (u16) GET_RX_DESC_CRC32(rxdesc); stats.hwerror = (stats.crc | stats.icv); stats.decrypted = !GET_RX_DESC_SWDEC(rxdesc); stats.rate = (u8) GET_RX_DESC_RX_MCS(rxdesc); stats.shortpreamble = (u16) GET_RX_DESC_SPLCP(rxdesc); stats.isampdu = (bool) ((GET_RX_DESC_PAGGR(rxdesc) == 1) && (GET_RX_DESC_FAGGR(rxdesc) == 1)); stats.timestamp_low = GET_RX_DESC_TSFL(rxdesc); stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc); /* TODO: is center_freq changed when doing scan? */ /* TODO: Shall we add protection or just skip those two step? */ rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; if (GET_RX_DESC_CRC32(rxdesc)) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (!GET_RX_DESC_SWDEC(rxdesc)) rx_status->flag |= RX_FLAG_DECRYPTED; if (GET_RX_DESC_BW(rxdesc)) rx_status->flag |= RX_FLAG_40MHZ; if (GET_RX_DESC_RX_HT(rxdesc)) rx_status->flag |= RX_FLAG_HT; /* Data rate */ rx_status->rate_idx = rtlwifi_rate_mapping(hw, (bool)GET_RX_DESC_RX_HT(rxdesc), (u8)GET_RX_DESC_RX_MCS(rxdesc), (bool)GET_RX_DESC_PAGGR(rxdesc)); /* There is a phy status after this rx descriptor. */ if (GET_RX_DESC_PHY_STATUS(rxdesc)) { p_drvinfo = (struct rx_fwinfo_92c *)(rxdesc + RTL_RX_DESC_SIZE); rtl92c_translate_rx_signal_stuff(hw, skb, &stats, (struct rx_desc_92c *)rxdesc, p_drvinfo); } skb_pull(skb, (drvinfo_len + RTL_RX_DESC_SIZE)); hdr = (struct ieee80211_hdr *)(skb->data); fc = hdr->frame_control; bv = ieee80211_is_probe_resp(fc); if (bv) RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Got probe response frame\n"); if (ieee80211_is_beacon(fc)) RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Got beacon frame\n"); if (ieee80211_is_data(fc)) RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Got data frame\n"); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Fram: fc = 0x%X addr1 = 0x%02X:0x%02X:0x%02X:0x%02X:0x%02X:0x%02X\n", fc, (u32)hdr->addr1[0], (u32)hdr->addr1[1], (u32)hdr->addr1[2], (u32)hdr->addr1[3], (u32)hdr->addr1[4], (u32)hdr->addr1[5]); memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status)); ieee80211_rx(hw, skb); } void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb) { _rtl_rx_process(hw, skb); } void rtl8192c_rx_segregate_hdl( struct ieee80211_hw *hw, struct sk_buff *skb, struct sk_buff_head *skb_list) { } /*---------------------------------------------------------------------- * * Tx handler * *---------------------------------------------------------------------- */ void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb) { } int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb, struct sk_buff *skb) { return 0; } struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *hw, struct sk_buff_head *list) { return skb_dequeue(list); } /*======================================== trx ===============================*/ static void _rtl_fill_usb_tx_desc(u8 *txdesc) { SET_TX_DESC_OWN(txdesc, 1); SET_TX_DESC_LAST_SEG(txdesc, 1); SET_TX_DESC_FIRST_SEG(txdesc, 1); } /** * For HW recovery information */ static void _rtl_tx_desc_checksum(u8 *txdesc) { u16 *ptr = (u16 *)txdesc; u16 checksum = 0; u32 index; /* Clear first */ SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0); for (index = 0; index < 16; index++) checksum = checksum ^ (*(ptr + index)); SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum); } void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *pbd_desc_tx, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff *skb, u8 queue_index, struct rtl_tcb_desc *tcb_desc) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); bool defaultadapter = true; u8 *qc = ieee80211_get_qos_ctl(hdr); u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; u16 seq_number; __le16 fc = hdr->frame_control; u8 rate_flag = info->control.rates[0].flags; u16 pktlen = skb->len; enum rtl_desc_qsel fw_qsel = _rtl8192cu_mq_to_descq(hw, fc, skb_get_queue_mapping(skb)); u8 *txdesc; seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc); txdesc = (u8 *)skb_push(skb, RTL_TX_HEADER_SIZE); memset(txdesc, 0, RTL_TX_HEADER_SIZE); SET_TX_DESC_PKT_SIZE(txdesc, pktlen); SET_TX_DESC_LINIP(txdesc, 0); SET_TX_DESC_PKT_OFFSET(txdesc, RTL_DUMMY_OFFSET); SET_TX_DESC_OFFSET(txdesc, RTL_TX_HEADER_SIZE); SET_TX_DESC_TX_RATE(txdesc, tcb_desc->hw_rate); if (tcb_desc->use_shortgi || tcb_desc->use_shortpreamble) SET_TX_DESC_DATA_SHORTGI(txdesc, 1); if (mac->tids[tid].agg.agg_state == RTL_AGG_ON && info->flags & IEEE80211_TX_CTL_AMPDU) { SET_TX_DESC_AGG_ENABLE(txdesc, 1); SET_TX_DESC_MAX_AGG_NUM(txdesc, 0x14); } else { SET_TX_DESC_AGG_BREAK(txdesc, 1); } SET_TX_DESC_SEQ(txdesc, seq_number); SET_TX_DESC_RTS_ENABLE(txdesc, ((tcb_desc->rts_enable && !tcb_desc->cts_enable) ? 1 : 0)); SET_TX_DESC_HW_RTS_ENABLE(txdesc, ((tcb_desc->rts_enable || tcb_desc->cts_enable) ? 1 : 0)); SET_TX_DESC_CTS2SELF(txdesc, ((tcb_desc->cts_enable) ? 1 : 0)); SET_TX_DESC_RTS_STBC(txdesc, ((tcb_desc->rts_stbc) ? 1 : 0)); SET_TX_DESC_RTS_RATE(txdesc, tcb_desc->rts_rate); SET_TX_DESC_RTS_BW(txdesc, 0); SET_TX_DESC_RTS_SC(txdesc, tcb_desc->rts_sc); SET_TX_DESC_RTS_SHORT(txdesc, ((tcb_desc->rts_rate <= DESC92_RATE54M) ? (tcb_desc->rts_use_shortpreamble ? 1 : 0) : (tcb_desc->rts_use_shortgi ? 1 : 0))); if (mac->bw_40) { if (rate_flag & IEEE80211_TX_RC_DUP_DATA) { SET_TX_DESC_DATA_BW(txdesc, 1); SET_TX_DESC_DATA_SC(txdesc, 3); } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){ SET_TX_DESC_DATA_BW(txdesc, 1); SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc); } else { SET_TX_DESC_DATA_BW(txdesc, 0); SET_TX_DESC_DATA_SC(txdesc, 0); } } else { SET_TX_DESC_DATA_BW(txdesc, 0); SET_TX_DESC_DATA_SC(txdesc, 0); } rcu_read_lock(); sta = ieee80211_find_sta(mac->vif, mac->bssid); if (sta) { u8 ampdu_density = sta->ht_cap.ampdu_density; SET_TX_DESC_AMPDU_DENSITY(txdesc, ampdu_density); } rcu_read_unlock(); if (info->control.hw_key) { struct ieee80211_key_conf *keyconf = info->control.hw_key; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: SET_TX_DESC_SEC_TYPE(txdesc, 0x1); break; case WLAN_CIPHER_SUITE_CCMP: SET_TX_DESC_SEC_TYPE(txdesc, 0x3); break; default: SET_TX_DESC_SEC_TYPE(txdesc, 0x0); break; } } SET_TX_DESC_PKT_ID(txdesc, 0); SET_TX_DESC_QUEUE_SEL(txdesc, fw_qsel); SET_TX_DESC_DATA_RATE_FB_LIMIT(txdesc, 0x1F); SET_TX_DESC_RTS_RATE_FB_LIMIT(txdesc, 0xF); SET_TX_DESC_DISABLE_FB(txdesc, 0); SET_TX_DESC_USE_RATE(txdesc, tcb_desc->use_driver_rate ? 1 : 0); if (ieee80211_is_data_qos(fc)) { if (mac->rdg_en) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "Enable RDG function\n"); SET_TX_DESC_RDG_ENABLE(txdesc, 1); SET_TX_DESC_HTC(txdesc, 1); } } if (rtlpriv->dm.useramask) { SET_TX_DESC_RATE_ID(txdesc, tcb_desc->ratr_index); SET_TX_DESC_MACID(txdesc, tcb_desc->mac_id); } else { SET_TX_DESC_RATE_ID(txdesc, 0xC + tcb_desc->ratr_index); SET_TX_DESC_MACID(txdesc, tcb_desc->ratr_index); } if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps && ppsc->fwctrl_lps) { SET_TX_DESC_HWSEQ_EN(txdesc, 1); SET_TX_DESC_PKT_ID(txdesc, 8); if (!defaultadapter) SET_TX_DESC_QOS(txdesc, 1); } if (ieee80211_has_morefrags(fc)) SET_TX_DESC_MORE_FRAG(txdesc, 1); if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || is_broadcast_ether_addr(ieee80211_get_DA(hdr))) SET_TX_DESC_BMC(txdesc, 1); _rtl_fill_usb_tx_desc(txdesc); _rtl_tx_desc_checksum(txdesc); RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "==>\n"); } void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc, u32 buffer_len, bool bIsPsPoll) { /* Clear all status */ memset(pDesc, 0, RTL_TX_HEADER_SIZE); SET_TX_DESC_FIRST_SEG(pDesc, 1); /* bFirstSeg; */ SET_TX_DESC_LAST_SEG(pDesc, 1); /* bLastSeg; */ SET_TX_DESC_OFFSET(pDesc, RTL_TX_HEADER_SIZE); /* Offset = 32 */ SET_TX_DESC_PKT_SIZE(pDesc, buffer_len); /* Buffer size + command hdr */ SET_TX_DESC_QUEUE_SEL(pDesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */ /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error * vlaue by Hw. */ if (bIsPsPoll) { SET_TX_DESC_NAV_USE_HDR(pDesc, 1); } else { SET_TX_DESC_HWSEQ_EN(pDesc, 1); /* Hw set sequence number */ SET_TX_DESC_PKT_ID(pDesc, 0x100); /* set bit3 to 1. */ } SET_TX_DESC_USE_RATE(pDesc, 1); /* use data rate which is set by Sw */ SET_TX_DESC_OWN(pDesc, 1); SET_TX_DESC_TX_RATE(pDesc, DESC92_RATE1M); _rtl_tx_desc_checksum(pDesc); } void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 fw_queue = QSLT_BEACON; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); __le16 fc = hdr->frame_control; memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE); if (firstseg) SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE); SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M); SET_TX_DESC_SEQ(pdesc, 0); SET_TX_DESC_LINIP(pdesc, 0); SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue); SET_TX_DESC_FIRST_SEG(pdesc, 1); SET_TX_DESC_LAST_SEG(pdesc, 1); SET_TX_DESC_RATE_ID(pdesc, 7); SET_TX_DESC_MACID(pdesc, 0); SET_TX_DESC_OWN(pdesc, 1); SET_TX_DESC_PKT_SIZE(pdesc, (u16)skb->len); SET_TX_DESC_FIRST_SEG(pdesc, 1); SET_TX_DESC_LAST_SEG(pdesc, 1); SET_TX_DESC_OFFSET(pdesc, 0x20); SET_TX_DESC_USE_RATE(pdesc, 1); if (!ieee80211_is_data_qos(fc)) { SET_TX_DESC_HWSEQ_EN(pdesc, 1); SET_TX_DESC_PKT_ID(pdesc, 8); } RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content", pdesc, RTL_TX_DESC_SIZE); } bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) { return true; }
gpl-2.0
nicolaerosia/linux-bn-omap4
drivers/atm/he.c
416
77907
/* he.c ForeRunnerHE ATM Adapter driver for ATM on Linux Copyright (C) 1999-2001 Naval Research Laboratory This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* he.c ForeRunnerHE ATM Adapter driver for ATM on Linux Copyright (C) 1999-2001 Naval Research Laboratory Permission to use, copy, modify and distribute this software and its documentation is hereby granted, provided that both the copyright notice and this permission notice appear in all copies of the software, derivative works or modified versions, and any portions thereof, and that both notices appear in supporting documentation. NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. This driver was written using the "Programmer's Reference Manual for ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98. AUTHORS: chas williams <chas@cmf.nrl.navy.mil> eric kinzie <ekinzie@cmf.nrl.navy.mil> NOTES: 4096 supported 'connections' group 0 is used for all traffic interrupt queue 0 is used for all interrupts aal0 support (based on work from ulrich.u.muller@nokia.com) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/bitmap.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/byteorder.h> #include <asm/uaccess.h> #include <linux/atmdev.h> #include <linux/atm.h> #include <linux/sonet.h> #undef USE_SCATTERGATHER #undef USE_CHECKSUM_HW /* still confused about this */ /* #undef HE_DEBUG */ #include "he.h" #include "suni.h" #include <linux/atm_he.h> #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args) #ifdef HE_DEBUG #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args) #else /* !HE_DEBUG */ #define HPRINTK(fmt,args...) do { } while (0) #endif /* HE_DEBUG */ /* declarations */ static int he_open(struct atm_vcc *vcc); static void he_close(struct atm_vcc *vcc); static int he_send(struct atm_vcc *vcc, struct sk_buff *skb); static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg); static irqreturn_t he_irq_handler(int irq, void *dev_id); static void he_tasklet(unsigned long data); static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page); static int he_start(struct atm_dev *dev); static void he_stop(struct he_dev *dev); static void he_phy_put(struct atm_dev *, unsigned char, unsigned long); static unsigned char he_phy_get(struct atm_dev *, unsigned long); static u8 read_prom_byte(struct he_dev *he_dev, int addr); /* globals */ static struct he_dev *he_devs; static bool disable64; static short nvpibits = -1; static short nvcibits = -1; static short rx_skb_reserve = 16; static bool irq_coalesce = true; static bool sdh; /* Read from EEPROM = 0000 0011b */ static unsigned int readtab[] = { CS_HIGH | CLK_HIGH, CS_LOW | CLK_LOW, CLK_HIGH, /* 0 */ CLK_LOW, CLK_HIGH, /* 0 */ CLK_LOW, CLK_HIGH, /* 0 */ CLK_LOW, CLK_HIGH, /* 0 */ CLK_LOW, CLK_HIGH, /* 0 */ CLK_LOW, CLK_HIGH, /* 0 */ CLK_LOW | SI_HIGH, CLK_HIGH | SI_HIGH, /* 1 */ CLK_LOW | SI_HIGH, CLK_HIGH | SI_HIGH /* 1 */ }; /* Clock to read from/write to the EEPROM */ static unsigned int clocktab[] = { CLK_LOW, CLK_HIGH, CLK_LOW, CLK_HIGH, CLK_LOW, CLK_HIGH, CLK_LOW, CLK_HIGH, CLK_LOW, CLK_HIGH, CLK_LOW, CLK_HIGH, CLK_LOW, CLK_HIGH, CLK_LOW, CLK_HIGH, CLK_LOW }; static struct atmdev_ops he_ops = { .open = he_open, .close = he_close, .ioctl = he_ioctl, .send = he_send, .phy_put = he_phy_put, .phy_get = he_phy_get, .proc_read = he_proc_read, .owner = THIS_MODULE }; #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0) #define he_readl(dev, reg) readl((dev)->membase + (reg)) /* section 2.12 connection memory access */ static __inline__ void he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr, unsigned flags) { he_writel(he_dev, val, CON_DAT); (void) he_readl(he_dev, CON_DAT); /* flush posted writes */ he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL); while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY); } #define he_writel_rcm(dev, val, reg) \ he_writel_internal(dev, val, reg, CON_CTL_RCM) #define he_writel_tcm(dev, val, reg) \ he_writel_internal(dev, val, reg, CON_CTL_TCM) #define he_writel_mbox(dev, val, reg) \ he_writel_internal(dev, val, reg, CON_CTL_MBOX) static unsigned he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags) { he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL); while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY); return he_readl(he_dev, CON_DAT); } #define he_readl_rcm(dev, reg) \ he_readl_internal(dev, reg, CON_CTL_RCM) #define he_readl_tcm(dev, reg) \ he_readl_internal(dev, reg, CON_CTL_TCM) #define he_readl_mbox(dev, reg) \ he_readl_internal(dev, reg, CON_CTL_MBOX) /* figure 2.2 connection id */ #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff) /* 2.5.1 per connection transmit state registers */ #define he_writel_tsr0(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0) #define he_readl_tsr0(dev, cid) \ he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0) #define he_writel_tsr1(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1) #define he_writel_tsr2(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2) #define he_writel_tsr3(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3) #define he_writel_tsr4(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4) /* from page 2-20 * * NOTE While the transmit connection is active, bits 23 through 0 * of this register must not be written by the host. Byte * enables should be used during normal operation when writing * the most significant byte. */ #define he_writel_tsr4_upper(dev, val, cid) \ he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \ CON_CTL_TCM \ | CON_BYTE_DISABLE_2 \ | CON_BYTE_DISABLE_1 \ | CON_BYTE_DISABLE_0) #define he_readl_tsr4(dev, cid) \ he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4) #define he_writel_tsr5(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5) #define he_writel_tsr6(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6) #define he_writel_tsr7(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7) #define he_writel_tsr8(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0) #define he_writel_tsr9(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1) #define he_writel_tsr10(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2) #define he_writel_tsr11(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3) #define he_writel_tsr12(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0) #define he_writel_tsr13(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1) #define he_writel_tsr14(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRD | cid) #define he_writel_tsr14_upper(dev, val, cid) \ he_writel_internal(dev, val, CONFIG_TSRD | cid, \ CON_CTL_TCM \ | CON_BYTE_DISABLE_2 \ | CON_BYTE_DISABLE_1 \ | CON_BYTE_DISABLE_0) /* 2.7.1 per connection receive state registers */ #define he_writel_rsr0(dev, val, cid) \ he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0) #define he_readl_rsr0(dev, cid) \ he_readl_rcm(dev, 0x00000 | (cid << 3) | 0) #define he_writel_rsr1(dev, val, cid) \ he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1) #define he_writel_rsr2(dev, val, cid) \ he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2) #define he_writel_rsr3(dev, val, cid) \ he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3) #define he_writel_rsr4(dev, val, cid) \ he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4) #define he_writel_rsr5(dev, val, cid) \ he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5) #define he_writel_rsr6(dev, val, cid) \ he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6) #define he_writel_rsr7(dev, val, cid) \ he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7) static __inline__ struct atm_vcc* __find_vcc(struct he_dev *he_dev, unsigned cid) { struct hlist_head *head; struct atm_vcc *vcc; struct sock *s; short vpi; int vci; vpi = cid >> he_dev->vcibits; vci = cid & ((1 << he_dev->vcibits) - 1); head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; sk_for_each(s, head) { vcc = atm_sk(s); if (vcc->dev == he_dev->atm_dev && vcc->vci == vci && vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE) { return vcc; } } return NULL; } static int he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent) { struct atm_dev *atm_dev = NULL; struct he_dev *he_dev = NULL; int err = 0; printk(KERN_INFO "ATM he driver\n"); if (pci_enable_device(pci_dev)) return -EIO; if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) { printk(KERN_WARNING "he: no suitable dma available\n"); err = -EIO; goto init_one_failure; } atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL); if (!atm_dev) { err = -ENODEV; goto init_one_failure; } pci_set_drvdata(pci_dev, atm_dev); he_dev = kzalloc(sizeof(struct he_dev), GFP_KERNEL); if (!he_dev) { err = -ENOMEM; goto init_one_failure; } he_dev->pci_dev = pci_dev; he_dev->atm_dev = atm_dev; he_dev->atm_dev->dev_data = he_dev; atm_dev->dev_data = he_dev; he_dev->number = atm_dev->number; tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev); spin_lock_init(&he_dev->global_lock); if (he_start(atm_dev)) { he_stop(he_dev); err = -ENODEV; goto init_one_failure; } he_dev->next = NULL; if (he_devs) he_dev->next = he_devs; he_devs = he_dev; return 0; init_one_failure: if (atm_dev) atm_dev_deregister(atm_dev); kfree(he_dev); pci_disable_device(pci_dev); return err; } static void he_remove_one(struct pci_dev *pci_dev) { struct atm_dev *atm_dev; struct he_dev *he_dev; atm_dev = pci_get_drvdata(pci_dev); he_dev = HE_DEV(atm_dev); /* need to remove from he_devs */ he_stop(he_dev); atm_dev_deregister(atm_dev); kfree(he_dev); pci_disable_device(pci_dev); } static unsigned rate_to_atmf(unsigned rate) /* cps to atm forum format */ { #define NONZERO (1 << 14) unsigned exp = 0; if (rate == 0) return 0; rate <<= 9; while (rate > 0x3ff) { ++exp; rate >>= 1; } return (NONZERO | (exp << 9) | (rate & 0x1ff)); } static void he_init_rx_lbfp0(struct he_dev *he_dev) { unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count; unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf; unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD; unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row; lbufd_index = 0; lbm_offset = he_readl(he_dev, RCMLBM_BA); he_writel(he_dev, lbufd_index, RLBF0_H); for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) { lbufd_index += 2; lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32; he_writel_rcm(he_dev, lbuf_addr, lbm_offset); he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1); if (++lbuf_count == lbufs_per_row) { lbuf_count = 0; row_offset += he_dev->bytes_per_row; } lbm_offset += 4; } he_writel(he_dev, lbufd_index - 2, RLBF0_T); he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C); } static void he_init_rx_lbfp1(struct he_dev *he_dev) { unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count; unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf; unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD; unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row; lbufd_index = 1; lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index); he_writel(he_dev, lbufd_index, RLBF1_H); for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) { lbufd_index += 2; lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32; he_writel_rcm(he_dev, lbuf_addr, lbm_offset); he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1); if (++lbuf_count == lbufs_per_row) { lbuf_count = 0; row_offset += he_dev->bytes_per_row; } lbm_offset += 4; } he_writel(he_dev, lbufd_index - 2, RLBF1_T); he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C); } static void he_init_tx_lbfp(struct he_dev *he_dev) { unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count; unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf; unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD; unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row; lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs; lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index); he_writel(he_dev, lbufd_index, TLBF_H); for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) { lbufd_index += 1; lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32; he_writel_rcm(he_dev, lbuf_addr, lbm_offset); he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1); if (++lbuf_count == lbufs_per_row) { lbuf_count = 0; row_offset += he_dev->bytes_per_row; } lbm_offset += 2; } he_writel(he_dev, lbufd_index - 1, TLBF_T); } static int he_init_tpdrq(struct he_dev *he_dev) { he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys, GFP_KERNEL); if (he_dev->tpdrq_base == NULL) { hprintk("failed to alloc tpdrq\n"); return -ENOMEM; } he_dev->tpdrq_tail = he_dev->tpdrq_base; he_dev->tpdrq_head = he_dev->tpdrq_base; he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H); he_writel(he_dev, 0, TPDRQ_T); he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S); return 0; } static void he_init_cs_block(struct he_dev *he_dev) { unsigned clock, rate, delta; int reg; /* 5.1.7 cs block initialization */ for (reg = 0; reg < 0x20; ++reg) he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg); /* rate grid timer reload values */ clock = he_is622(he_dev) ? 66667000 : 50000000; rate = he_dev->atm_dev->link_rate; delta = rate / 16 / 2; for (reg = 0; reg < 0x10; ++reg) { /* 2.4 internal transmit function * * we initialize the first row in the rate grid. * values are period (in clock cycles) of timer */ unsigned period = clock / rate; he_writel_mbox(he_dev, period, CS_TGRLD0 + reg); rate -= delta; } if (he_is622(he_dev)) { /* table 5.2 (4 cells per lbuf) */ he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0); he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1); he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2); he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3); he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4); /* table 5.3, 5.4, 5.5, 5.6, 5.7 */ he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0); he_writel_mbox(he_dev, 0x1801, CS_ERCTL1); he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2); he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0); he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1); he_writel_mbox(he_dev, 0x14585, CS_RTFWR); he_writel_mbox(he_dev, 0x4680, CS_RTATR); /* table 5.8 */ he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET); he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX); he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN); he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC); he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC); he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL); /* table 5.9 */ he_writel_mbox(he_dev, 0x5, CS_OTPPER); he_writel_mbox(he_dev, 0x14, CS_OTWPER); } else { /* table 5.1 (4 cells per lbuf) */ he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0); he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1); he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2); he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3); he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4); /* table 5.3, 5.4, 5.5, 5.6, 5.7 */ he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0); he_writel_mbox(he_dev, 0x4701, CS_ERCTL1); he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2); he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0); he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1); he_writel_mbox(he_dev, 0xf424, CS_RTFWR); he_writel_mbox(he_dev, 0x4680, CS_RTATR); /* table 5.8 */ he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET); he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX); he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN); he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC); he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC); he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL); /* table 5.9 */ he_writel_mbox(he_dev, 0x6, CS_OTPPER); he_writel_mbox(he_dev, 0x1e, CS_OTWPER); } he_writel_mbox(he_dev, 0x8, CS_OTTLIM); for (reg = 0; reg < 0x8; ++reg) he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg); } static int he_init_cs_block_rcm(struct he_dev *he_dev) { unsigned (*rategrid)[16][16]; unsigned rate, delta; int i, j, reg; unsigned rate_atmf, exp, man; unsigned long long rate_cps; int mult, buf, buf_limit = 4; rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL); if (!rategrid) return -ENOMEM; /* initialize rate grid group table */ for (reg = 0x0; reg < 0xff; ++reg) he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg); /* initialize rate controller groups */ for (reg = 0x100; reg < 0x1ff; ++reg) he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg); /* initialize tNrm lookup table */ /* the manual makes reference to a routine in a sample driver for proper configuration; fortunately, we only need this in order to support abr connection */ /* initialize rate to group table */ rate = he_dev->atm_dev->link_rate; delta = rate / 32; /* * 2.4 transmit internal functions * * we construct a copy of the rate grid used by the scheduler * in order to construct the rate to group table below */ for (j = 0; j < 16; j++) { (*rategrid)[0][j] = rate; rate -= delta; } for (i = 1; i < 16; i++) for (j = 0; j < 16; j++) if (i > 14) (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4; else (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2; /* * 2.4 transmit internal function * * this table maps the upper 5 bits of exponent and mantissa * of the atm forum representation of the rate into an index * on rate grid */ rate_atmf = 0; while (rate_atmf < 0x400) { man = (rate_atmf & 0x1f) << 4; exp = rate_atmf >> 5; /* instead of '/ 512', use '>> 9' to prevent a call to divdu3 on x86 platforms */ rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9; if (rate_cps < 10) rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */ for (i = 255; i > 0; i--) if ((*rategrid)[i/16][i%16] >= rate_cps) break; /* pick nearest rate instead? */ /* * each table entry is 16 bits: (rate grid index (8 bits) * and a buffer limit (8 bits) * there are two table entries in each 32-bit register */ #ifdef notdef buf = rate_cps * he_dev->tx_numbuffs / (he_dev->atm_dev->link_rate * 2); #else /* this is pretty, but avoids _divdu3 and is mostly correct */ mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR; if (rate_cps > (272 * mult)) buf = 4; else if (rate_cps > (204 * mult)) buf = 3; else if (rate_cps > (136 * mult)) buf = 2; else if (rate_cps > (68 * mult)) buf = 1; else buf = 0; #endif if (buf > buf_limit) buf = buf_limit; reg = (reg << 16) | ((i << 8) | buf); #define RTGTBL_OFFSET 0x400 if (rate_atmf & 0x1) he_writel_rcm(he_dev, reg, CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1)); ++rate_atmf; } kfree(rategrid); return 0; } static int he_init_group(struct he_dev *he_dev, int group) { struct he_buff *heb, *next; dma_addr_t mapping; int i; he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32)); he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32)); he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32)); he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), G0_RBPS_BS + (group * 32)); /* bitmap table */ he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE) * sizeof(unsigned long), GFP_KERNEL); if (!he_dev->rbpl_table) { hprintk("unable to allocate rbpl bitmap table\n"); return -ENOMEM; } bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE); /* rbpl_virt 64-bit pointers */ he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE * sizeof(struct he_buff *), GFP_KERNEL); if (!he_dev->rbpl_virt) { hprintk("unable to allocate rbpl virt table\n"); goto out_free_rbpl_table; } /* large buffer pool */ he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev, CONFIG_RBPL_BUFSIZE, 64, 0); if (he_dev->rbpl_pool == NULL) { hprintk("unable to create rbpl pool\n"); goto out_free_rbpl_virt; } he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys, GFP_KERNEL); if (he_dev->rbpl_base == NULL) { hprintk("failed to alloc rbpl_base\n"); goto out_destroy_rbpl_pool; } INIT_LIST_HEAD(&he_dev->rbpl_outstanding); for (i = 0; i < CONFIG_RBPL_SIZE; ++i) { heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping); if (!heb) goto out_free_rbpl; heb->mapping = mapping; list_add(&heb->entry, &he_dev->rbpl_outstanding); set_bit(i, he_dev->rbpl_table); he_dev->rbpl_virt[i] = heb; he_dev->rbpl_hint = i + 1; he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET; he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data); } he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1]; he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32)); he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T + (group * 32)); he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4, G0_RBPL_BS + (group * 32)); he_writel(he_dev, RBP_THRESH(CONFIG_RBPL_THRESH) | RBP_QSIZE(CONFIG_RBPL_SIZE - 1) | RBP_INT_ENB, G0_RBPL_QI + (group * 32)); /* rx buffer ready queue */ he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys, GFP_KERNEL); if (he_dev->rbrq_base == NULL) { hprintk("failed to allocate rbrq\n"); goto out_free_rbpl; } he_dev->rbrq_head = he_dev->rbrq_base; he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16)); he_writel(he_dev, 0, G0_RBRQ_H + (group * 16)); he_writel(he_dev, RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1), G0_RBRQ_Q + (group * 16)); if (irq_coalesce) { hprintk("coalescing interrupts\n"); he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7), G0_RBRQ_I + (group * 16)); } else he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1), G0_RBRQ_I + (group * 16)); /* tx buffer ready queue */ he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys, GFP_KERNEL); if (he_dev->tbrq_base == NULL) { hprintk("failed to allocate tbrq\n"); goto out_free_rbpq_base; } he_dev->tbrq_head = he_dev->tbrq_base; he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16)); he_writel(he_dev, 0, G0_TBRQ_H + (group * 16)); he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16)); he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16)); return 0; out_free_rbpq_base: dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), he_dev->rbrq_base, he_dev->rbrq_phys); out_free_rbpl: list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping); dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys); out_destroy_rbpl_pool: dma_pool_destroy(he_dev->rbpl_pool); out_free_rbpl_virt: kfree(he_dev->rbpl_virt); out_free_rbpl_table: kfree(he_dev->rbpl_table); return -ENOMEM; } static int he_init_irq(struct he_dev *he_dev) { int i; /* 2.9.3.5 tail offset for each interrupt queue is located after the end of the interrupt queue */ he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq), &he_dev->irq_phys, GFP_KERNEL); if (he_dev->irq_base == NULL) { hprintk("failed to allocate irq\n"); return -ENOMEM; } he_dev->irq_tailoffset = (unsigned *) &he_dev->irq_base[CONFIG_IRQ_SIZE]; *he_dev->irq_tailoffset = 0; he_dev->irq_head = he_dev->irq_base; he_dev->irq_tail = he_dev->irq_base; for (i = 0; i < CONFIG_IRQ_SIZE; ++i) he_dev->irq_base[i].isw = ITYPE_INVALID; he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE); he_writel(he_dev, IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH), IRQ0_HEAD); he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL); he_writel(he_dev, 0x0, IRQ0_DATA); he_writel(he_dev, 0x0, IRQ1_BASE); he_writel(he_dev, 0x0, IRQ1_HEAD); he_writel(he_dev, 0x0, IRQ1_CNTL); he_writel(he_dev, 0x0, IRQ1_DATA); he_writel(he_dev, 0x0, IRQ2_BASE); he_writel(he_dev, 0x0, IRQ2_HEAD); he_writel(he_dev, 0x0, IRQ2_CNTL); he_writel(he_dev, 0x0, IRQ2_DATA); he_writel(he_dev, 0x0, IRQ3_BASE); he_writel(he_dev, 0x0, IRQ3_HEAD); he_writel(he_dev, 0x0, IRQ3_CNTL); he_writel(he_dev, 0x0, IRQ3_DATA); /* 2.9.3.2 interrupt queue mapping registers */ he_writel(he_dev, 0x0, GRP_10_MAP); he_writel(he_dev, 0x0, GRP_32_MAP); he_writel(he_dev, 0x0, GRP_54_MAP); he_writel(he_dev, 0x0, GRP_76_MAP); if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) { hprintk("irq %d already in use\n", he_dev->pci_dev->irq); return -EINVAL; } he_dev->irq = he_dev->pci_dev->irq; return 0; } static int he_start(struct atm_dev *dev) { struct he_dev *he_dev; struct pci_dev *pci_dev; unsigned long membase; u16 command; u32 gen_cntl_0, host_cntl, lb_swap; u8 cache_size, timer; unsigned err; unsigned int status, reg; int i, group; he_dev = HE_DEV(dev); pci_dev = he_dev->pci_dev; membase = pci_resource_start(pci_dev, 0); HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq); /* * pci bus controller initialization */ /* 4.3 pci bus controller-specific initialization */ if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) { hprintk("can't read GEN_CNTL_0\n"); return -EINVAL; } gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT); if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) { hprintk("can't write GEN_CNTL_0.\n"); return -EINVAL; } if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) { hprintk("can't read PCI_COMMAND.\n"); return -EINVAL; } command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE); if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) { hprintk("can't enable memory.\n"); return -EINVAL; } if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) { hprintk("can't read cache line size?\n"); return -EINVAL; } if (cache_size < 16) { cache_size = 16; if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size)) hprintk("can't set cache line size to %d\n", cache_size); } if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) { hprintk("can't read latency timer?\n"); return -EINVAL; } /* from table 3.9 * * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE * * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles] * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles] * */ #define LAT_TIMER 209 if (timer < LAT_TIMER) { HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER); timer = LAT_TIMER; if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer)) hprintk("can't set latency timer to %d\n", timer); } if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) { hprintk("can't set up page mapping\n"); return -EINVAL; } /* 4.4 card reset */ he_writel(he_dev, 0x0, RESET_CNTL); he_writel(he_dev, 0xff, RESET_CNTL); msleep(16); /* 16 ms */ status = he_readl(he_dev, RESET_CNTL); if ((status & BOARD_RST_STATUS) == 0) { hprintk("reset failed\n"); return -EINVAL; } /* 4.5 set bus width */ host_cntl = he_readl(he_dev, HOST_CNTL); if (host_cntl & PCI_BUS_SIZE64) gen_cntl_0 |= ENBL_64; else gen_cntl_0 &= ~ENBL_64; if (disable64 == 1) { hprintk("disabling 64-bit pci bus transfers\n"); gen_cntl_0 &= ~ENBL_64; } if (gen_cntl_0 & ENBL_64) hprintk("64-bit transfers enabled\n"); pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0); /* 4.7 read prom contents */ for (i = 0; i < PROD_ID_LEN; ++i) he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i); he_dev->media = read_prom_byte(he_dev, MEDIA); for (i = 0; i < 6; ++i) dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i); hprintk("%s%s, %pM\n", he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM", dev->esi); he_dev->atm_dev->link_rate = he_is622(he_dev) ? ATM_OC12_PCR : ATM_OC3_PCR; /* 4.6 set host endianess */ lb_swap = he_readl(he_dev, LB_SWAP); if (he_is622(he_dev)) lb_swap &= ~XFER_SIZE; /* 4 cells */ else lb_swap |= XFER_SIZE; /* 8 cells */ #ifdef __BIG_ENDIAN lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST; #else lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST | DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP); #endif /* __BIG_ENDIAN */ he_writel(he_dev, lb_swap, LB_SWAP); /* 4.8 sdram controller initialization */ he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL); /* 4.9 initialize rnum value */ lb_swap |= SWAP_RNUM_MAX(0xf); he_writel(he_dev, lb_swap, LB_SWAP); /* 4.10 initialize the interrupt queues */ if ((err = he_init_irq(he_dev)) != 0) return err; /* 4.11 enable pci bus controller state machines */ host_cntl |= (OUTFF_ENB | CMDFF_ENB | QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB); he_writel(he_dev, host_cntl, HOST_CNTL); gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB; pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0); /* * atm network controller initialization */ /* 5.1.1 generic configuration state */ /* * local (cell) buffer memory map * * HE155 HE622 * * 0 ____________1023 bytes 0 _______________________2047 bytes * | | | | | * | utility | | rx0 | | * 5|____________| 255|___________________| u | * 6| | 256| | t | * | | | | i | * | rx0 | row | tx | l | * | | | | i | * | | 767|___________________| t | * 517|____________| 768| | y | * row 518| | | rx1 | | * | | 1023|___________________|___| * | | * | tx | * | | * | | * 1535|____________| * 1536| | * | rx1 | * 2047|____________| * */ /* total 4096 connections */ he_dev->vcibits = CONFIG_DEFAULT_VCIBITS; he_dev->vpibits = CONFIG_DEFAULT_VPIBITS; if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) { hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS); return -ENODEV; } if (nvpibits != -1) { he_dev->vpibits = nvpibits; he_dev->vcibits = HE_MAXCIDBITS - nvpibits; } if (nvcibits != -1) { he_dev->vcibits = nvcibits; he_dev->vpibits = HE_MAXCIDBITS - nvcibits; } if (he_is622(he_dev)) { he_dev->cells_per_row = 40; he_dev->bytes_per_row = 2048; he_dev->r0_numrows = 256; he_dev->tx_numrows = 512; he_dev->r1_numrows = 256; he_dev->r0_startrow = 0; he_dev->tx_startrow = 256; he_dev->r1_startrow = 768; } else { he_dev->cells_per_row = 20; he_dev->bytes_per_row = 1024; he_dev->r0_numrows = 512; he_dev->tx_numrows = 1018; he_dev->r1_numrows = 512; he_dev->r0_startrow = 6; he_dev->tx_startrow = 518; he_dev->r1_startrow = 1536; } he_dev->cells_per_lbuf = 4; he_dev->buffer_limit = 4; he_dev->r0_numbuffs = he_dev->r0_numrows * he_dev->cells_per_row / he_dev->cells_per_lbuf; if (he_dev->r0_numbuffs > 2560) he_dev->r0_numbuffs = 2560; he_dev->r1_numbuffs = he_dev->r1_numrows * he_dev->cells_per_row / he_dev->cells_per_lbuf; if (he_dev->r1_numbuffs > 2560) he_dev->r1_numbuffs = 2560; he_dev->tx_numbuffs = he_dev->tx_numrows * he_dev->cells_per_row / he_dev->cells_per_lbuf; if (he_dev->tx_numbuffs > 5120) he_dev->tx_numbuffs = 5120; /* 5.1.2 configure hardware dependent registers */ he_writel(he_dev, SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) | RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) | (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) | (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)), LBARB); he_writel(he_dev, BANK_ON | (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)), SDRAMCON); he_writel(he_dev, (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) | RM_RW_WAIT(1), RCMCONFIG); he_writel(he_dev, (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) | TM_RW_WAIT(1), TCMCONFIG); he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG); he_writel(he_dev, (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) | (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) | RX_VALVP(he_dev->vpibits) | RX_VALVC(he_dev->vcibits), RC_CONFIG); he_writel(he_dev, DRF_THRESH(0x20) | (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) | TX_VCI_MASK(he_dev->vcibits) | LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG); he_writel(he_dev, 0x0, TXAAL5_PROTO); he_writel(he_dev, PHY_INT_ENB | (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)), RH_CONFIG); /* 5.1.3 initialize connection memory */ for (i = 0; i < TCM_MEM_SIZE; ++i) he_writel_tcm(he_dev, 0, i); for (i = 0; i < RCM_MEM_SIZE; ++i) he_writel_rcm(he_dev, 0, i); /* * transmit connection memory map * * tx memory * 0x0 ___________________ * | | * | | * | TSRa | * | | * | | * 0x8000|___________________| * | | * | TSRb | * 0xc000|___________________| * | | * | TSRc | * 0xe000|___________________| * | TSRd | * 0xf000|___________________| * | tmABR | * 0x10000|___________________| * | | * | tmTPD | * |___________________| * | | * .... * 0x1ffff|___________________| * * */ he_writel(he_dev, CONFIG_TSRB, TSRB_BA); he_writel(he_dev, CONFIG_TSRC, TSRC_BA); he_writel(he_dev, CONFIG_TSRD, TSRD_BA); he_writel(he_dev, CONFIG_TMABR, TMABR_BA); he_writel(he_dev, CONFIG_TPDBA, TPD_BA); /* * receive connection memory map * * 0x0 ___________________ * | | * | | * | RSRa | * | | * | | * 0x8000|___________________| * | | * | rx0/1 | * | LBM | link lists of local * | tx | buffer memory * | | * 0xd000|___________________| * | | * | rmABR | * 0xe000|___________________| * | | * | RSRb | * |___________________| * | | * .... * 0xffff|___________________| */ he_writel(he_dev, 0x08000, RCMLBM_BA); he_writel(he_dev, 0x0e000, RCMRSRB_BA); he_writel(he_dev, 0x0d800, RCMABR_BA); /* 5.1.4 initialize local buffer free pools linked lists */ he_init_rx_lbfp0(he_dev); he_init_rx_lbfp1(he_dev); he_writel(he_dev, 0x0, RLBC_H); he_writel(he_dev, 0x0, RLBC_T); he_writel(he_dev, 0x0, RLBC_H2); he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */ he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */ he_init_tx_lbfp(he_dev); he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA); /* 5.1.5 initialize intermediate receive queues */ if (he_is622(he_dev)) { he_writel(he_dev, 0x000f, G0_INMQ_S); he_writel(he_dev, 0x200f, G0_INMQ_L); he_writel(he_dev, 0x001f, G1_INMQ_S); he_writel(he_dev, 0x201f, G1_INMQ_L); he_writel(he_dev, 0x002f, G2_INMQ_S); he_writel(he_dev, 0x202f, G2_INMQ_L); he_writel(he_dev, 0x003f, G3_INMQ_S); he_writel(he_dev, 0x203f, G3_INMQ_L); he_writel(he_dev, 0x004f, G4_INMQ_S); he_writel(he_dev, 0x204f, G4_INMQ_L); he_writel(he_dev, 0x005f, G5_INMQ_S); he_writel(he_dev, 0x205f, G5_INMQ_L); he_writel(he_dev, 0x006f, G6_INMQ_S); he_writel(he_dev, 0x206f, G6_INMQ_L); he_writel(he_dev, 0x007f, G7_INMQ_S); he_writel(he_dev, 0x207f, G7_INMQ_L); } else { he_writel(he_dev, 0x0000, G0_INMQ_S); he_writel(he_dev, 0x0008, G0_INMQ_L); he_writel(he_dev, 0x0001, G1_INMQ_S); he_writel(he_dev, 0x0009, G1_INMQ_L); he_writel(he_dev, 0x0002, G2_INMQ_S); he_writel(he_dev, 0x000a, G2_INMQ_L); he_writel(he_dev, 0x0003, G3_INMQ_S); he_writel(he_dev, 0x000b, G3_INMQ_L); he_writel(he_dev, 0x0004, G4_INMQ_S); he_writel(he_dev, 0x000c, G4_INMQ_L); he_writel(he_dev, 0x0005, G5_INMQ_S); he_writel(he_dev, 0x000d, G5_INMQ_L); he_writel(he_dev, 0x0006, G6_INMQ_S); he_writel(he_dev, 0x000e, G6_INMQ_L); he_writel(he_dev, 0x0007, G7_INMQ_S); he_writel(he_dev, 0x000f, G7_INMQ_L); } /* 5.1.6 application tunable parameters */ he_writel(he_dev, 0x0, MCC); he_writel(he_dev, 0x0, OEC); he_writel(he_dev, 0x0, DCC); he_writel(he_dev, 0x0, CEC); /* 5.1.7 cs block initialization */ he_init_cs_block(he_dev); /* 5.1.8 cs block connection memory initialization */ if (he_init_cs_block_rcm(he_dev) < 0) return -ENOMEM; /* 5.1.10 initialize host structures */ he_init_tpdrq(he_dev); he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev, sizeof(struct he_tpd), TPD_ALIGNMENT, 0); if (he_dev->tpd_pool == NULL) { hprintk("unable to create tpd dma_pool\n"); return -ENOMEM; } INIT_LIST_HEAD(&he_dev->outstanding_tpds); if (he_init_group(he_dev, 0) != 0) return -ENOMEM; for (group = 1; group < HE_NUM_GROUPS; ++group) { he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32)); he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32)); he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32)); he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), G0_RBPS_BS + (group * 32)); he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32)); he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32)); he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), G0_RBPL_QI + (group * 32)); he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32)); he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16)); he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16)); he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0), G0_RBRQ_Q + (group * 16)); he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16)); he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16)); he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16)); he_writel(he_dev, TBRQ_THRESH(0x1), G0_TBRQ_THRESH + (group * 16)); he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16)); } /* host status page */ he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp), &he_dev->hsp_phys, GFP_KERNEL); if (he_dev->hsp == NULL) { hprintk("failed to allocate host status page\n"); return -ENOMEM; } he_writel(he_dev, he_dev->hsp_phys, HSP_BA); /* initialize framer */ #ifdef CONFIG_ATM_HE_USE_SUNI if (he_isMM(he_dev)) suni_init(he_dev->atm_dev); if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start) he_dev->atm_dev->phy->start(he_dev->atm_dev); #endif /* CONFIG_ATM_HE_USE_SUNI */ if (sdh) { /* this really should be in suni.c but for now... */ int val; val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM); val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT); he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM); he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP); } /* 5.1.12 enable transmit and receive */ reg = he_readl_mbox(he_dev, CS_ERCTL0); reg |= TX_ENABLE|ER_ENABLE; he_writel_mbox(he_dev, reg, CS_ERCTL0); reg = he_readl(he_dev, RC_CONFIG); reg |= RX_ENABLE; he_writel(he_dev, reg, RC_CONFIG); for (i = 0; i < HE_NUM_CS_STPER; ++i) { he_dev->cs_stper[i].inuse = 0; he_dev->cs_stper[i].pcr = -1; } he_dev->total_bw = 0; /* atm linux initialization */ he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits; he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits; he_dev->irq_peak = 0; he_dev->rbrq_peak = 0; he_dev->rbpl_peak = 0; he_dev->tbrq_peak = 0; HPRINTK("hell bent for leather!\n"); return 0; } static void he_stop(struct he_dev *he_dev) { struct he_buff *heb, *next; struct pci_dev *pci_dev; u32 gen_cntl_0, reg; u16 command; pci_dev = he_dev->pci_dev; /* disable interrupts */ if (he_dev->membase) { pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0); gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB); pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0); tasklet_disable(&he_dev->tasklet); /* disable recv and transmit */ reg = he_readl_mbox(he_dev, CS_ERCTL0); reg &= ~(TX_ENABLE|ER_ENABLE); he_writel_mbox(he_dev, reg, CS_ERCTL0); reg = he_readl(he_dev, RC_CONFIG); reg &= ~(RX_ENABLE); he_writel(he_dev, reg, RC_CONFIG); } #ifdef CONFIG_ATM_HE_USE_SUNI if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop) he_dev->atm_dev->phy->stop(he_dev->atm_dev); #endif /* CONFIG_ATM_HE_USE_SUNI */ if (he_dev->irq) free_irq(he_dev->irq, he_dev); if (he_dev->irq_base) dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys); if (he_dev->hsp) dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp), he_dev->hsp, he_dev->hsp_phys); if (he_dev->rbpl_base) { list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping); dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys); } kfree(he_dev->rbpl_virt); kfree(he_dev->rbpl_table); dma_pool_destroy(he_dev->rbpl_pool); if (he_dev->rbrq_base) dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), he_dev->rbrq_base, he_dev->rbrq_phys); if (he_dev->tbrq_base) dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), he_dev->tbrq_base, he_dev->tbrq_phys); if (he_dev->tpdrq_base) dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), he_dev->tpdrq_base, he_dev->tpdrq_phys); dma_pool_destroy(he_dev->tpd_pool); if (he_dev->pci_dev) { pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command); command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command); } if (he_dev->membase) iounmap(he_dev->membase); } static struct he_tpd * __alloc_tpd(struct he_dev *he_dev) { struct he_tpd *tpd; dma_addr_t mapping; tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping); if (tpd == NULL) return NULL; tpd->status = TPD_ADDR(mapping); tpd->reserved = 0; tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0; tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0; tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0; return tpd; } #define AAL5_LEN(buf,len) \ ((((unsigned char *)(buf))[(len)-6] << 8) | \ (((unsigned char *)(buf))[(len)-5])) /* 2.10.1.2 receive * * aal5 packets can optionally return the tcp checksum in the lower * 16 bits of the crc (RSR0_TCP_CKSUM) */ #define TCP_CKSUM(buf,len) \ ((((unsigned char *)(buf))[(len)-2] << 8) | \ (((unsigned char *)(buf))[(len-1)])) static int he_service_rbrq(struct he_dev *he_dev, int group) { struct he_rbrq *rbrq_tail = (struct he_rbrq *) ((unsigned long)he_dev->rbrq_base | he_dev->hsp->group[group].rbrq_tail); unsigned cid, lastcid = -1; struct sk_buff *skb; struct atm_vcc *vcc = NULL; struct he_vcc *he_vcc; struct he_buff *heb, *next; int i; int pdus_assembled = 0; int updated = 0; read_lock(&vcc_sklist_lock); while (he_dev->rbrq_head != rbrq_tail) { ++updated; HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n", he_dev->rbrq_head, group, RBRQ_ADDR(he_dev->rbrq_head), RBRQ_BUFLEN(he_dev->rbrq_head), RBRQ_CID(he_dev->rbrq_head), RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "", RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "", RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "", RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "", RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "", RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : ""); i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET; heb = he_dev->rbpl_virt[i]; cid = RBRQ_CID(he_dev->rbrq_head); if (cid != lastcid) vcc = __find_vcc(he_dev, cid); lastcid = cid; if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) { hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid); if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) { clear_bit(i, he_dev->rbpl_table); list_del(&heb->entry); dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping); } goto next_rbrq_entry; } if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { hprintk("HBUF_ERR! (cid 0x%x)\n", cid); atomic_inc(&vcc->stats->rx_drop); goto return_host_buffers; } heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4; clear_bit(i, he_dev->rbpl_table); list_move_tail(&heb->entry, &he_vcc->buffers); he_vcc->pdu_len += heb->len; if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) { lastcid = -1; HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid); wake_up(&he_vcc->rx_waitq); goto return_host_buffers; } if (!RBRQ_END_PDU(he_dev->rbrq_head)) goto next_rbrq_entry; if (RBRQ_LEN_ERR(he_dev->rbrq_head) || RBRQ_CRC_ERR(he_dev->rbrq_head)) { HPRINTK("%s%s (%d.%d)\n", RBRQ_CRC_ERR(he_dev->rbrq_head) ? "CRC_ERR " : "", RBRQ_LEN_ERR(he_dev->rbrq_head) ? "LEN_ERR" : "", vcc->vpi, vcc->vci); atomic_inc(&vcc->stats->rx_err); goto return_host_buffers; } skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve, GFP_ATOMIC); if (!skb) { HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci); goto return_host_buffers; } if (rx_skb_reserve > 0) skb_reserve(skb, rx_skb_reserve); __net_timestamp(skb); list_for_each_entry(heb, &he_vcc->buffers, entry) memcpy(skb_put(skb, heb->len), &heb->data, heb->len); switch (vcc->qos.aal) { case ATM_AAL0: /* 2.10.1.5 raw cell receive */ skb->len = ATM_AAL0_SDU; skb_set_tail_pointer(skb, skb->len); break; case ATM_AAL5: /* 2.10.1.2 aal5 receive */ skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len); skb_set_tail_pointer(skb, skb->len); #ifdef USE_CHECKSUM_HW if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) { skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = TCP_CKSUM(skb->data, he_vcc->pdu_len); } #endif break; } #ifdef should_never_happen if (skb->len > vcc->qos.rxtp.max_sdu) hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid); #endif #ifdef notdef ATM_SKB(skb)->vcc = vcc; #endif spin_unlock(&he_dev->global_lock); vcc->push(vcc, skb); spin_lock(&he_dev->global_lock); atomic_inc(&vcc->stats->rx); return_host_buffers: ++pdus_assembled; list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry) dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping); INIT_LIST_HEAD(&he_vcc->buffers); he_vcc->pdu_len = 0; next_rbrq_entry: he_dev->rbrq_head = (struct he_rbrq *) ((unsigned long) he_dev->rbrq_base | RBRQ_MASK(he_dev->rbrq_head + 1)); } read_unlock(&vcc_sklist_lock); if (updated) { if (updated > he_dev->rbrq_peak) he_dev->rbrq_peak = updated; he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head), G0_RBRQ_H + (group * 16)); } return pdus_assembled; } static void he_service_tbrq(struct he_dev *he_dev, int group) { struct he_tbrq *tbrq_tail = (struct he_tbrq *) ((unsigned long)he_dev->tbrq_base | he_dev->hsp->group[group].tbrq_tail); struct he_tpd *tpd; int slot, updated = 0; struct he_tpd *__tpd; /* 2.1.6 transmit buffer return queue */ while (he_dev->tbrq_head != tbrq_tail) { ++updated; HPRINTK("tbrq%d 0x%x%s%s\n", group, TBRQ_TPD(he_dev->tbrq_head), TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "", TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : ""); tpd = NULL; list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) { if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) { tpd = __tpd; list_del(&__tpd->entry); break; } } if (tpd == NULL) { hprintk("unable to locate tpd for dma buffer %x\n", TBRQ_TPD(he_dev->tbrq_head)); goto next_tbrq_entry; } if (TBRQ_EOS(he_dev->tbrq_head)) { HPRINTK("wake_up(tx_waitq) cid 0x%x\n", he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci)); if (tpd->vcc) wake_up(&HE_VCC(tpd->vcc)->tx_waitq); goto next_tbrq_entry; } for (slot = 0; slot < TPD_MAXIOV; ++slot) { if (tpd->iovec[slot].addr) dma_unmap_single(&he_dev->pci_dev->dev, tpd->iovec[slot].addr, tpd->iovec[slot].len & TPD_LEN_MASK, DMA_TO_DEVICE); if (tpd->iovec[slot].len & TPD_LST) break; } if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */ if (tpd->vcc && tpd->vcc->pop) tpd->vcc->pop(tpd->vcc, tpd->skb); else dev_kfree_skb_any(tpd->skb); } next_tbrq_entry: if (tpd) dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); he_dev->tbrq_head = (struct he_tbrq *) ((unsigned long) he_dev->tbrq_base | TBRQ_MASK(he_dev->tbrq_head + 1)); } if (updated) { if (updated > he_dev->tbrq_peak) he_dev->tbrq_peak = updated; he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head), G0_TBRQ_H + (group * 16)); } } static void he_service_rbpl(struct he_dev *he_dev, int group) { struct he_rbp *new_tail; struct he_rbp *rbpl_head; struct he_buff *heb; dma_addr_t mapping; int i; int moved = 0; rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | RBPL_MASK(he_readl(he_dev, G0_RBPL_S))); for (;;) { new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | RBPL_MASK(he_dev->rbpl_tail+1)); /* table 3.42 -- rbpl_tail should never be set to rbpl_head */ if (new_tail == rbpl_head) break; i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint); if (i > (RBPL_TABLE_SIZE - 1)) { i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE); if (i > (RBPL_TABLE_SIZE - 1)) break; } he_dev->rbpl_hint = i + 1; heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping); if (!heb) break; heb->mapping = mapping; list_add(&heb->entry, &he_dev->rbpl_outstanding); he_dev->rbpl_virt[i] = heb; set_bit(i, he_dev->rbpl_table); new_tail->idx = i << RBP_IDX_OFFSET; new_tail->phys = mapping + offsetof(struct he_buff, data); he_dev->rbpl_tail = new_tail; ++moved; } if (moved) he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T); } static void he_tasklet(unsigned long data) { unsigned long flags; struct he_dev *he_dev = (struct he_dev *) data; int group, type; int updated = 0; HPRINTK("tasklet (0x%lx)\n", data); spin_lock_irqsave(&he_dev->global_lock, flags); while (he_dev->irq_head != he_dev->irq_tail) { ++updated; type = ITYPE_TYPE(he_dev->irq_head->isw); group = ITYPE_GROUP(he_dev->irq_head->isw); switch (type) { case ITYPE_RBRQ_THRESH: HPRINTK("rbrq%d threshold\n", group); /* fall through */ case ITYPE_RBRQ_TIMER: if (he_service_rbrq(he_dev, group)) he_service_rbpl(he_dev, group); break; case ITYPE_TBRQ_THRESH: HPRINTK("tbrq%d threshold\n", group); /* fall through */ case ITYPE_TPD_COMPLETE: he_service_tbrq(he_dev, group); break; case ITYPE_RBPL_THRESH: he_service_rbpl(he_dev, group); break; case ITYPE_RBPS_THRESH: /* shouldn't happen unless small buffers enabled */ break; case ITYPE_PHY: HPRINTK("phy interrupt\n"); #ifdef CONFIG_ATM_HE_USE_SUNI spin_unlock_irqrestore(&he_dev->global_lock, flags); if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt) he_dev->atm_dev->phy->interrupt(he_dev->atm_dev); spin_lock_irqsave(&he_dev->global_lock, flags); #endif break; case ITYPE_OTHER: switch (type|group) { case ITYPE_PARITY: hprintk("parity error\n"); break; case ITYPE_ABORT: hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR)); break; } break; case ITYPE_TYPE(ITYPE_INVALID): /* see 8.1.1 -- check all queues */ HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw); he_service_rbrq(he_dev, 0); he_service_rbpl(he_dev, 0); he_service_tbrq(he_dev, 0); break; default: hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw); } he_dev->irq_head->isw = ITYPE_INVALID; he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK); } if (updated) { if (updated > he_dev->irq_peak) he_dev->irq_peak = updated; he_writel(he_dev, IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH) | IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD); (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */ } spin_unlock_irqrestore(&he_dev->global_lock, flags); } static irqreturn_t he_irq_handler(int irq, void *dev_id) { unsigned long flags; struct he_dev *he_dev = (struct he_dev * )dev_id; int handled = 0; if (he_dev == NULL) return IRQ_NONE; spin_lock_irqsave(&he_dev->global_lock, flags); he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) | (*he_dev->irq_tailoffset << 2)); if (he_dev->irq_tail == he_dev->irq_head) { HPRINTK("tailoffset not updated?\n"); he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base | ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2)); (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */ } #ifdef DEBUG if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */) hprintk("spurious (or shared) interrupt?\n"); #endif if (he_dev->irq_head != he_dev->irq_tail) { handled = 1; tasklet_schedule(&he_dev->tasklet); he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */ (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */ } spin_unlock_irqrestore(&he_dev->global_lock, flags); return IRQ_RETVAL(handled); } static __inline__ void __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid) { struct he_tpdrq *new_tail; HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n", tpd, cid, he_dev->tpdrq_tail); /* new_tail = he_dev->tpdrq_tail; */ new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base | TPDRQ_MASK(he_dev->tpdrq_tail+1)); /* * check to see if we are about to set the tail == head * if true, update the head pointer from the adapter * to see if this is really the case (reading the queue * head for every enqueue would be unnecessarily slow) */ if (new_tail == he_dev->tpdrq_head) { he_dev->tpdrq_head = (struct he_tpdrq *) (((unsigned long)he_dev->tpdrq_base) | TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H))); if (new_tail == he_dev->tpdrq_head) { int slot; hprintk("tpdrq full (cid 0x%x)\n", cid); /* * FIXME * push tpd onto a transmit backlog queue * after service_tbrq, service the backlog * for now, we just drop the pdu */ for (slot = 0; slot < TPD_MAXIOV; ++slot) { if (tpd->iovec[slot].addr) dma_unmap_single(&he_dev->pci_dev->dev, tpd->iovec[slot].addr, tpd->iovec[slot].len & TPD_LEN_MASK, DMA_TO_DEVICE); } if (tpd->skb) { if (tpd->vcc->pop) tpd->vcc->pop(tpd->vcc, tpd->skb); else dev_kfree_skb_any(tpd->skb); atomic_inc(&tpd->vcc->stats->tx_err); } dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); return; } } /* 2.1.5 transmit packet descriptor ready queue */ list_add_tail(&tpd->entry, &he_dev->outstanding_tpds); he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status); he_dev->tpdrq_tail->cid = cid; wmb(); he_dev->tpdrq_tail = new_tail; he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T); (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */ } static int he_open(struct atm_vcc *vcc) { unsigned long flags; struct he_dev *he_dev = HE_DEV(vcc->dev); struct he_vcc *he_vcc; int err = 0; unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock; short vpi = vcc->vpi; int vci = vcc->vci; if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC) return 0; HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci); set_bit(ATM_VF_ADDR, &vcc->flags); cid = he_mkcid(he_dev, vpi, vci); he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC); if (he_vcc == NULL) { hprintk("unable to allocate he_vcc during open\n"); return -ENOMEM; } INIT_LIST_HEAD(&he_vcc->buffers); he_vcc->pdu_len = 0; he_vcc->rc_index = -1; init_waitqueue_head(&he_vcc->rx_waitq); init_waitqueue_head(&he_vcc->tx_waitq); vcc->dev_data = he_vcc; if (vcc->qos.txtp.traffic_class != ATM_NONE) { int pcr_goal; pcr_goal = atm_pcr_goal(&vcc->qos.txtp); if (pcr_goal == 0) pcr_goal = he_dev->atm_dev->link_rate; if (pcr_goal < 0) /* means round down, technically */ pcr_goal = -pcr_goal; HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal); switch (vcc->qos.aal) { case ATM_AAL5: tsr0_aal = TSR0_AAL5; tsr4 = TSR4_AAL5; break; case ATM_AAL0: tsr0_aal = TSR0_AAL0_SDU; tsr4 = TSR4_AAL0_SDU; break; default: err = -EINVAL; goto open_failed; } spin_lock_irqsave(&he_dev->global_lock, flags); tsr0 = he_readl_tsr0(he_dev, cid); spin_unlock_irqrestore(&he_dev->global_lock, flags); if (TSR0_CONN_STATE(tsr0) != 0) { hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0); err = -EBUSY; goto open_failed; } switch (vcc->qos.txtp.traffic_class) { case ATM_UBR: /* 2.3.3.1 open connection ubr */ tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal | TSR0_USE_WMIN | TSR0_UPDATE_GER; break; case ATM_CBR: /* 2.3.3.2 open connection cbr */ /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */ if ((he_dev->total_bw + pcr_goal) > (he_dev->atm_dev->link_rate * 9 / 10)) { err = -EBUSY; goto open_failed; } spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */ /* find an unused cs_stper register */ for (reg = 0; reg < HE_NUM_CS_STPER; ++reg) if (he_dev->cs_stper[reg].inuse == 0 || he_dev->cs_stper[reg].pcr == pcr_goal) break; if (reg == HE_NUM_CS_STPER) { err = -EBUSY; spin_unlock_irqrestore(&he_dev->global_lock, flags); goto open_failed; } he_dev->total_bw += pcr_goal; he_vcc->rc_index = reg; ++he_dev->cs_stper[reg].inuse; he_dev->cs_stper[reg].pcr = pcr_goal; clock = he_is622(he_dev) ? 66667000 : 50000000; period = clock / pcr_goal; HPRINTK("rc_index = %d period = %d\n", reg, period); he_writel_mbox(he_dev, rate_to_atmf(period/2), CS_STPER0 + reg); spin_unlock_irqrestore(&he_dev->global_lock, flags); tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal | TSR0_RC_INDEX(reg); break; default: err = -EINVAL; goto open_failed; } spin_lock_irqsave(&he_dev->global_lock, flags); he_writel_tsr0(he_dev, tsr0, cid); he_writel_tsr4(he_dev, tsr4 | 1, cid); he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) | TSR1_PCR(rate_to_atmf(pcr_goal)), cid); he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid); he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid); he_writel_tsr3(he_dev, 0x0, cid); he_writel_tsr5(he_dev, 0x0, cid); he_writel_tsr6(he_dev, 0x0, cid); he_writel_tsr7(he_dev, 0x0, cid); he_writel_tsr8(he_dev, 0x0, cid); he_writel_tsr10(he_dev, 0x0, cid); he_writel_tsr11(he_dev, 0x0, cid); he_writel_tsr12(he_dev, 0x0, cid); he_writel_tsr13(he_dev, 0x0, cid); he_writel_tsr14(he_dev, 0x0, cid); (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */ spin_unlock_irqrestore(&he_dev->global_lock, flags); } if (vcc->qos.rxtp.traffic_class != ATM_NONE) { unsigned aal; HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid, &HE_VCC(vcc)->rx_waitq); switch (vcc->qos.aal) { case ATM_AAL5: aal = RSR0_AAL5; break; case ATM_AAL0: aal = RSR0_RAWCELL; break; default: err = -EINVAL; goto open_failed; } spin_lock_irqsave(&he_dev->global_lock, flags); rsr0 = he_readl_rsr0(he_dev, cid); if (rsr0 & RSR0_OPEN_CONN) { spin_unlock_irqrestore(&he_dev->global_lock, flags); hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0); err = -EBUSY; goto open_failed; } rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY; rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY; rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ? (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0; #ifdef USE_CHECKSUM_HW if (vpi == 0 && vci >= ATM_NOT_RSV_VCI) rsr0 |= RSR0_TCP_CKSUM; #endif he_writel_rsr4(he_dev, rsr4, cid); he_writel_rsr1(he_dev, rsr1, cid); /* 5.1.11 last parameter initialized should be the open/closed indication in rsr0 */ he_writel_rsr0(he_dev, rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid); (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */ spin_unlock_irqrestore(&he_dev->global_lock, flags); } open_failed: if (err) { kfree(he_vcc); clear_bit(ATM_VF_ADDR, &vcc->flags); } else set_bit(ATM_VF_READY, &vcc->flags); return err; } static void he_close(struct atm_vcc *vcc) { unsigned long flags; DECLARE_WAITQUEUE(wait, current); struct he_dev *he_dev = HE_DEV(vcc->dev); struct he_tpd *tpd; unsigned cid; struct he_vcc *he_vcc = HE_VCC(vcc); #define MAX_RETRY 30 int retry = 0, sleep = 1, tx_inuse; HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci); clear_bit(ATM_VF_READY, &vcc->flags); cid = he_mkcid(he_dev, vcc->vpi, vcc->vci); if (vcc->qos.rxtp.traffic_class != ATM_NONE) { int timeout; HPRINTK("close rx cid 0x%x\n", cid); /* 2.7.2.2 close receive operation */ /* wait for previous close (if any) to finish */ spin_lock_irqsave(&he_dev->global_lock, flags); while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) { HPRINTK("close cid 0x%x RCC_BUSY\n", cid); udelay(250); } set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&he_vcc->rx_waitq, &wait); he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid); (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */ he_writel_mbox(he_dev, cid, RXCON_CLOSE); spin_unlock_irqrestore(&he_dev->global_lock, flags); timeout = schedule_timeout(30*HZ); remove_wait_queue(&he_vcc->rx_waitq, &wait); set_current_state(TASK_RUNNING); if (timeout == 0) hprintk("close rx timeout cid 0x%x\n", cid); HPRINTK("close rx cid 0x%x complete\n", cid); } if (vcc->qos.txtp.traffic_class != ATM_NONE) { volatile unsigned tsr4, tsr0; int timeout; HPRINTK("close tx cid 0x%x\n", cid); /* 2.1.2 * * ... the host must first stop queueing packets to the TPDRQ * on the connection to be closed, then wait for all outstanding * packets to be transmitted and their buffers returned to the * TBRQ. When the last packet on the connection arrives in the * TBRQ, the host issues the close command to the adapter. */ while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) && (retry < MAX_RETRY)) { msleep(sleep); if (sleep < 250) sleep = sleep * 2; ++retry; } if (tx_inuse > 1) hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse); /* 2.3.1.1 generic close operations with flush */ spin_lock_irqsave(&he_dev->global_lock, flags); he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid); /* also clears TSR4_SESSION_ENDED */ switch (vcc->qos.txtp.traffic_class) { case ATM_UBR: he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(200000)) | TSR1_PCR(0), cid); break; case ATM_CBR: he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid); break; } (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */ tpd = __alloc_tpd(he_dev); if (tpd == NULL) { hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid); goto close_tx_incomplete; } tpd->status |= TPD_EOS | TPD_INT; tpd->skb = NULL; tpd->vcc = vcc; wmb(); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&he_vcc->tx_waitq, &wait); __enqueue_tpd(he_dev, tpd, cid); spin_unlock_irqrestore(&he_dev->global_lock, flags); timeout = schedule_timeout(30*HZ); remove_wait_queue(&he_vcc->tx_waitq, &wait); set_current_state(TASK_RUNNING); spin_lock_irqsave(&he_dev->global_lock, flags); if (timeout == 0) { hprintk("close tx timeout cid 0x%x\n", cid); goto close_tx_incomplete; } while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) { HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4); udelay(250); } while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) { HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0); udelay(250); } close_tx_incomplete: if (vcc->qos.txtp.traffic_class == ATM_CBR) { int reg = he_vcc->rc_index; HPRINTK("cs_stper reg = %d\n", reg); if (he_dev->cs_stper[reg].inuse == 0) hprintk("cs_stper[%d].inuse = 0!\n", reg); else --he_dev->cs_stper[reg].inuse; he_dev->total_bw -= he_dev->cs_stper[reg].pcr; } spin_unlock_irqrestore(&he_dev->global_lock, flags); HPRINTK("close tx cid 0x%x complete\n", cid); } kfree(he_vcc); clear_bit(ATM_VF_ADDR, &vcc->flags); } static int he_send(struct atm_vcc *vcc, struct sk_buff *skb) { unsigned long flags; struct he_dev *he_dev = HE_DEV(vcc->dev); unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci); struct he_tpd *tpd; #ifdef USE_SCATTERGATHER int i, slot = 0; #endif #define HE_TPD_BUFSIZE 0xffff HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci); if ((skb->len > HE_TPD_BUFSIZE) || ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) { hprintk("buffer too large (or small) -- %d bytes\n", skb->len ); if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); atomic_inc(&vcc->stats->tx_err); return -EINVAL; } #ifndef USE_SCATTERGATHER if (skb_shinfo(skb)->nr_frags) { hprintk("no scatter/gather support\n"); if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); atomic_inc(&vcc->stats->tx_err); return -EINVAL; } #endif spin_lock_irqsave(&he_dev->global_lock, flags); tpd = __alloc_tpd(he_dev); if (tpd == NULL) { if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); atomic_inc(&vcc->stats->tx_err); spin_unlock_irqrestore(&he_dev->global_lock, flags); return -ENOMEM; } if (vcc->qos.aal == ATM_AAL5) tpd->status |= TPD_CELLTYPE(TPD_USERCELL); else { char *pti_clp = (void *) (skb->data + 3); int clp, pti; pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; clp = (*pti_clp & ATM_HDR_CLP); tpd->status |= TPD_CELLTYPE(pti); if (clp) tpd->status |= TPD_CLP; skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD); } #ifdef USE_SCATTERGATHER tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); tpd->iovec[slot].len = skb_headlen(skb); ++slot; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */ tpd->vcc = vcc; tpd->skb = NULL; /* not the last fragment so dont ->push() yet */ wmb(); __enqueue_tpd(he_dev, tpd, cid); tpd = __alloc_tpd(he_dev); if (tpd == NULL) { if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); atomic_inc(&vcc->stats->tx_err); spin_unlock_irqrestore(&he_dev->global_lock, flags); return -ENOMEM; } tpd->status |= TPD_USERCELL; slot = 0; } tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, (void *) page_address(frag->page) + frag->page_offset, frag->size, DMA_TO_DEVICE); tpd->iovec[slot].len = frag->size; ++slot; } tpd->iovec[slot - 1].len |= TPD_LST; #else tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE); tpd->length0 = skb->len | TPD_LST; #endif tpd->status |= TPD_INT; tpd->vcc = vcc; tpd->skb = skb; wmb(); ATM_SKB(skb)->vcc = vcc; __enqueue_tpd(he_dev, tpd, cid); spin_unlock_irqrestore(&he_dev->global_lock, flags); atomic_inc(&vcc->stats->tx); return 0; } static int he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg) { unsigned long flags; struct he_dev *he_dev = HE_DEV(atm_dev); struct he_ioctl_reg reg; int err = 0; switch (cmd) { case HE_GET_REG: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&reg, arg, sizeof(struct he_ioctl_reg))) return -EFAULT; spin_lock_irqsave(&he_dev->global_lock, flags); switch (reg.type) { case HE_REGTYPE_PCI: if (reg.addr >= HE_REGMAP_SIZE) { err = -EINVAL; break; } reg.val = he_readl(he_dev, reg.addr); break; case HE_REGTYPE_RCM: reg.val = he_readl_rcm(he_dev, reg.addr); break; case HE_REGTYPE_TCM: reg.val = he_readl_tcm(he_dev, reg.addr); break; case HE_REGTYPE_MBOX: reg.val = he_readl_mbox(he_dev, reg.addr); break; default: err = -EINVAL; break; } spin_unlock_irqrestore(&he_dev->global_lock, flags); if (err == 0) if (copy_to_user(arg, &reg, sizeof(struct he_ioctl_reg))) return -EFAULT; break; default: #ifdef CONFIG_ATM_HE_USE_SUNI if (atm_dev->phy && atm_dev->phy->ioctl) err = atm_dev->phy->ioctl(atm_dev, cmd, arg); #else /* CONFIG_ATM_HE_USE_SUNI */ err = -EINVAL; #endif /* CONFIG_ATM_HE_USE_SUNI */ break; } return err; } static void he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr) { unsigned long flags; struct he_dev *he_dev = HE_DEV(atm_dev); HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr); spin_lock_irqsave(&he_dev->global_lock, flags); he_writel(he_dev, val, FRAMER + (addr*4)); (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */ spin_unlock_irqrestore(&he_dev->global_lock, flags); } static unsigned char he_phy_get(struct atm_dev *atm_dev, unsigned long addr) { unsigned long flags; struct he_dev *he_dev = HE_DEV(atm_dev); unsigned reg; spin_lock_irqsave(&he_dev->global_lock, flags); reg = he_readl(he_dev, FRAMER + (addr*4)); spin_unlock_irqrestore(&he_dev->global_lock, flags); HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg); return reg; } static int he_proc_read(struct atm_dev *dev, loff_t *pos, char *page) { unsigned long flags; struct he_dev *he_dev = HE_DEV(dev); int left, i; #ifdef notdef struct he_rbrq *rbrq_tail; struct he_tpdrq *tpdrq_head; int rbpl_head, rbpl_tail; #endif static long mcc = 0, oec = 0, dcc = 0, cec = 0; left = *pos; if (!left--) return sprintf(page, "ATM he driver\n"); if (!left--) return sprintf(page, "%s%s\n\n", he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM"); if (!left--) return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n"); spin_lock_irqsave(&he_dev->global_lock, flags); mcc += he_readl(he_dev, MCC); oec += he_readl(he_dev, OEC); dcc += he_readl(he_dev, DCC); cec += he_readl(he_dev, CEC); spin_unlock_irqrestore(&he_dev->global_lock, flags); if (!left--) return sprintf(page, "%16ld %16ld %13ld %17ld\n\n", mcc, oec, dcc, cec); if (!left--) return sprintf(page, "irq_size = %d inuse = ? peak = %d\n", CONFIG_IRQ_SIZE, he_dev->irq_peak); if (!left--) return sprintf(page, "tpdrq_size = %d inuse = ?\n", CONFIG_TPDRQ_SIZE); if (!left--) return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n", CONFIG_RBRQ_SIZE, he_dev->rbrq_peak); if (!left--) return sprintf(page, "tbrq_size = %d peak = %d\n", CONFIG_TBRQ_SIZE, he_dev->tbrq_peak); #ifdef notdef rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S)); rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T)); inuse = rbpl_head - rbpl_tail; if (inuse < 0) inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp); inuse /= sizeof(struct he_rbp); if (!left--) return sprintf(page, "rbpl_size = %d inuse = %d\n\n", CONFIG_RBPL_SIZE, inuse); #endif if (!left--) return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n"); for (i = 0; i < HE_NUM_CS_STPER; ++i) if (!left--) return sprintf(page, "cs_stper%-2d %8ld %3d\n", i, he_dev->cs_stper[i].pcr, he_dev->cs_stper[i].inuse); if (!left--) return sprintf(page, "total bw (cbr): %d (limit %d)\n", he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9); return 0; } /* eeprom routines -- see 4.7 */ static u8 read_prom_byte(struct he_dev *he_dev, int addr) { u32 val = 0, tmp_read = 0; int i, j = 0; u8 byte_read = 0; val = readl(he_dev->membase + HOST_CNTL); val &= 0xFFFFE0FF; /* Turn on write enable */ val |= 0x800; he_writel(he_dev, val, HOST_CNTL); /* Send READ instruction */ for (i = 0; i < ARRAY_SIZE(readtab); i++) { he_writel(he_dev, val | readtab[i], HOST_CNTL); udelay(EEPROM_DELAY); } /* Next, we need to send the byte address to read from */ for (i = 7; i >= 0; i--) { he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL); udelay(EEPROM_DELAY); he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL); udelay(EEPROM_DELAY); } j = 0; val &= 0xFFFFF7FF; /* Turn off write enable */ he_writel(he_dev, val, HOST_CNTL); /* Now, we can read data from the EEPROM by clocking it in */ for (i = 7; i >= 0; i--) { he_writel(he_dev, val | clocktab[j++], HOST_CNTL); udelay(EEPROM_DELAY); tmp_read = he_readl(he_dev, HOST_CNTL); byte_read |= (unsigned char) ((tmp_read & ID_DOUT) >> ID_DOFFSET << i); he_writel(he_dev, val | clocktab[j++], HOST_CNTL); udelay(EEPROM_DELAY); } he_writel(he_dev, val | ID_CS, HOST_CNTL); udelay(EEPROM_DELAY); return byte_read; } MODULE_LICENSE("GPL"); MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>"); MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver"); module_param(disable64, bool, 0); MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers"); module_param(nvpibits, short, 0); MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)"); module_param(nvcibits, short, 0); MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)"); module_param(rx_skb_reserve, short, 0); MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)"); module_param(irq_coalesce, bool, 0); MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)"); module_param(sdh, bool, 0); MODULE_PARM_DESC(sdh, "use SDH framing (default 0)"); static struct pci_device_id he_pci_tbl[] = { { PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 }, { 0, } }; MODULE_DEVICE_TABLE(pci, he_pci_tbl); static struct pci_driver he_driver = { .name = "he", .probe = he_init_one, .remove = he_remove_one, .id_table = he_pci_tbl, }; module_pci_driver(he_driver);
gpl-2.0
clemsyn/Grouper
drivers/net/wireless/iwlegacy/iwl-rx.c
416
9419
/****************************************************************************** * * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/etherdevice.h> #include <linux/slab.h> #include <net/mac80211.h> #include <asm/unaligned.h> #include "iwl-eeprom.h" #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-sta.h" #include "iwl-io.h" #include "iwl-helpers.h" /************************** RX-FUNCTIONS ****************************/ /* * Rx theory of operation * * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), * each of which point to Receive Buffers to be filled by the NIC. These get * used not only for Rx frames, but for any command response or notification * from the NIC. The driver and NIC manage the Rx buffers by means * of indexes into the circular buffer. * * Rx Queue Indexes * The host/firmware share two index registers for managing the Rx buffers. * * The READ index maps to the first position that the firmware may be writing * to -- the driver can read up to (but not including) this position and get * good data. * The READ index is managed by the firmware once the card is enabled. * * The WRITE index maps to the last position the driver has read from -- the * position preceding WRITE is the last slot the firmware can place a packet. * * The queue is empty (no good data) if WRITE = READ - 1, and is full if * WRITE = READ. * * During initialization, the host sets up the READ queue position to the first * INDEX position, and WRITE to the last (READ - 1 wrapped) * * When the firmware places a packet in a buffer, it will advance the READ index * and fire the RX interrupt. The driver can then query the READ index and * process as many packets as possible, moving the WRITE index forward as it * resets the Rx queue buffers with new memory. * * The management in the driver is as follows: * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled * to replenish the iwl->rxq->rx_free. * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the * iwl->rxq is replenished and the READ INDEX is updated (updating the * 'processed' and 'read' driver indexes as well) * + A received packet is processed and handed to the kernel network stack, * detached from the iwl->rxq. The driver 'processed' index is updated. * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there * were enough free buffers and RX_STALLED is set it is cleared. * * * Driver sequence: * * iwl_legacy_rx_queue_alloc() Allocates rx_free * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls * iwl_rx_queue_restock * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx * queue, updates firmware pointers, and updates * the WRITE index. If insufficient rx_free buffers * are available, schedules iwl_rx_replenish * * -- enable interrupts -- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the * READ INDEX, detaching the SKB from the pool. * Moves the packet buffer from queue to rx_used. * Calls iwl_rx_queue_restock to refill any empty * slots. * ... * */ /** * iwl_legacy_rx_queue_space - Return number of free slots available in queue. */ int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q) { int s = q->read - q->write; if (s <= 0) s += RX_QUEUE_SIZE; /* keep some buffer to not confuse full and empty queue */ s -= 2; if (s < 0) s = 0; return s; } EXPORT_SYMBOL(iwl_legacy_rx_queue_space); /** * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue */ void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) { unsigned long flags; u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg; u32 reg; spin_lock_irqsave(&q->lock, flags); if (q->need_update == 0) goto exit_unlock; /* If power-saving is in use, make sure device is awake */ if (test_bit(STATUS_POWER_PMI, &priv->status)) { reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { IWL_DEBUG_INFO(priv, "Rx queue requesting wakeup," " GP1 = 0x%x\n", reg); iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); goto exit_unlock; } q->write_actual = (q->write & ~0x7); iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual); /* Else device is assumed to be awake */ } else { /* Device expects a multiple of 8 */ q->write_actual = (q->write & ~0x7); iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual); } q->need_update = 0; exit_unlock: spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr); int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv) { struct iwl_rx_queue *rxq = &priv->rxq; struct device *dev = &priv->pci_dev->dev; int i; spin_lock_init(&rxq->lock); INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, GFP_KERNEL); if (!rxq->bd) goto err_bd; rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status), &rxq->rb_stts_dma, GFP_KERNEL); if (!rxq->rb_stts) goto err_rb; /* Fill the rx_used queue with _all_ of the Rx buffers */ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) list_add_tail(&rxq->pool[i].list, &rxq->rx_used); /* Set us so that we have processed and used all buffers, but have * not restocked the Rx queue with fresh buffers */ rxq->read = rxq->write = 0; rxq->write_actual = 0; rxq->free_count = 0; rxq->need_update = 0; return 0; err_rb: dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, rxq->bd_dma); err_bd: return -ENOMEM; } EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc); void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); if (!report->state) { IWL_DEBUG_11H(priv, "Spectrum Measure Notification: Start\n"); return; } memcpy(&priv->measure_report, report, sizeof(*report)); priv->measurement_status |= MEASUREMENT_READY; } EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif); /* * returns non-zero if packet should be dropped */ int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv, struct ieee80211_hdr *hdr, u32 decrypt_res, struct ieee80211_rx_status *stats) { u16 fc = le16_to_cpu(hdr->frame_control); /* * All contexts have the same setting here due to it being * a module parameter, so OK to check any context. */ if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) return 0; if (!(fc & IEEE80211_FCTL_PROTECTED)) return 0; IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res); switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { case RX_RES_STATUS_SEC_TYPE_TKIP: /* The uCode has got a bad phase 1 Key, pushes the packet. * Decryption will be done in SW. */ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == RX_RES_STATUS_BAD_KEY_TTAK) break; case RX_RES_STATUS_SEC_TYPE_WEP: if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == RX_RES_STATUS_BAD_ICV_MIC) { /* bad ICV, the packet is destroyed since the * decryption is inplace, drop it */ IWL_DEBUG_RX(priv, "Packet destroyed\n"); return -1; } case RX_RES_STATUS_SEC_TYPE_CCMP: if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == RX_RES_STATUS_DECRYPT_OK) { IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n"); stats->flag |= RX_FLAG_DECRYPTED; } break; default: break; } return 0; } EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
gpl-2.0
aseering/linux
arch/mips/pmcs-msp71xx/msp_irq_cic.c
928
5045
/* * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c * * This file define the irq handler for MSP CIC subsystem interrupts. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/irq.h> #include <asm/mipsregs.h> #include <msp_cic_int.h> #include <msp_regs.h> /* * External API */ extern void msp_per_irq_init(void); extern void msp_per_irq_dispatch(void); /* * Convenience Macro. Should be somewhere generic. */ #define get_current_vpe() \ ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE) #ifdef CONFIG_SMP #define LOCK_VPE(flags, mtflags) \ do { \ local_irq_save(flags); \ mtflags = dmt(); \ } while (0) #define UNLOCK_VPE(flags, mtflags) \ do { \ emt(mtflags); \ local_irq_restore(flags);\ } while (0) #define LOCK_CORE(flags, mtflags) \ do { \ local_irq_save(flags); \ mtflags = dvpe(); \ } while (0) #define UNLOCK_CORE(flags, mtflags) \ do { \ evpe(mtflags); \ local_irq_restore(flags);\ } while (0) #else #define LOCK_VPE(flags, mtflags) #define UNLOCK_VPE(flags, mtflags) #endif /* ensure writes to cic are completed */ static inline void cic_wmb(void) { const volatile void __iomem *cic_mem = CIC_VPE0_MSK_REG; volatile u32 dummy_read; wmb(); dummy_read = __raw_readl(cic_mem); dummy_read++; } static void unmask_cic_irq(struct irq_data *d) { volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG; int vpe; #ifdef CONFIG_SMP unsigned int mtflags; unsigned long flags; /* * Make sure we have IRQ affinity. It may have changed while * we were processing the IRQ. */ if (!cpumask_test_cpu(smp_processor_id(), irq_data_get_affinity_mask(d))) return; #endif vpe = get_current_vpe(); LOCK_VPE(flags, mtflags); cic_msk_reg[vpe] |= (1 << (d->irq - MSP_CIC_INTBASE)); UNLOCK_VPE(flags, mtflags); cic_wmb(); } static void mask_cic_irq(struct irq_data *d) { volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG; int vpe = get_current_vpe(); #ifdef CONFIG_SMP unsigned long flags, mtflags; #endif LOCK_VPE(flags, mtflags); cic_msk_reg[vpe] &= ~(1 << (d->irq - MSP_CIC_INTBASE)); UNLOCK_VPE(flags, mtflags); cic_wmb(); } static void msp_cic_irq_ack(struct irq_data *d) { mask_cic_irq(d); /* * Only really necessary for 18, 16-14 and sometimes 3:0 * (since these can be edge sensitive) but it doesn't * hurt for the others */ *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE)); } /* Note: Limiting to VSMP. */ #ifdef CONFIG_MIPS_MT_SMP static int msp_cic_irq_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) { int cpu; unsigned long flags; unsigned int mtflags; unsigned long imask = (1 << (d->irq - MSP_CIC_INTBASE)); volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG; /* timer balancing should be disabled in kernel code */ BUG_ON(d->irq == MSP_INT_VPE0_TIMER || d->irq == MSP_INT_VPE1_TIMER); LOCK_CORE(flags, mtflags); /* enable if any of each VPE's TCs require this IRQ */ for_each_online_cpu(cpu) { if (cpumask_test_cpu(cpu, cpumask)) cic_mask[cpu] |= imask; else cic_mask[cpu] &= ~imask; } UNLOCK_CORE(flags, mtflags); return 0; } #endif static struct irq_chip msp_cic_irq_controller = { .name = "MSP_CIC", .irq_mask = mask_cic_irq, .irq_mask_ack = msp_cic_irq_ack, .irq_unmask = unmask_cic_irq, .irq_ack = msp_cic_irq_ack, #ifdef CONFIG_MIPS_MT_SMP .irq_set_affinity = msp_cic_irq_set_affinity, #endif }; void __init msp_cic_irq_init(void) { int i; /* Mask/clear interrupts. */ *CIC_VPE0_MSK_REG = 0x00000000; *CIC_VPE1_MSK_REG = 0x00000000; *CIC_STS_REG = 0xFFFFFFFF; /* * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI. * These inputs map to EXT_INT_POL[6:4] inside the CIC. * They are to be active low, level sensitive. */ *CIC_EXT_CFG_REG &= 0xFFFF8F8F; /* initialize all the IRQ descriptors */ for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) { irq_set_chip_and_handler(i, &msp_cic_irq_controller, handle_level_irq); } /* Initialize the PER interrupt sub-system */ msp_per_irq_init(); } /* CIC masked by CIC vector processing before dispatch called */ void msp_cic_irq_dispatch(void) { volatile u32 *cic_msk_reg = (volatile u32 *)CIC_VPE0_MSK_REG; u32 cic_mask; u32 pending; int cic_status = *CIC_STS_REG; cic_mask = cic_msk_reg[get_current_vpe()]; pending = cic_status & cic_mask; if (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))) { do_IRQ(MSP_INT_VPE0_TIMER); } else if (pending & (1 << (MSP_INT_VPE1_TIMER - MSP_CIC_INTBASE))) { do_IRQ(MSP_INT_VPE1_TIMER); } else if (pending & (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) { msp_per_irq_dispatch(); } else if (pending) { do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1); } else{ spurious_interrupt(); } }
gpl-2.0
shutt1e/lge_kernel_msm7x30
sound/soc/s6000/s6105-ipcam.c
1440
6874
/* * ASoC driver for Stretch s6105 IP camera platform * * Author: Daniel Gloeckner, <dg@emlix.com> * Copyright: (C) 2009 emlix GmbH <info@emlix.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <variant/dmac.h> #include "../codecs/tlv320aic3x.h" #include "s6000-pcm.h" #include "s6000-i2s.h" #define S6105_CAM_CODEC_CLOCK 12288000 static int s6105_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->dai->codec_dai; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; int ret = 0; /* set codec DAI configuration */ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM); if (ret < 0) return ret; /* set cpu DAI configuration */ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_NB_NF); if (ret < 0) return ret; /* set the codec system clock */ ret = snd_soc_dai_set_sysclk(codec_dai, 0, S6105_CAM_CODEC_CLOCK, SND_SOC_CLOCK_OUT); if (ret < 0) return ret; return 0; } static struct snd_soc_ops s6105_ops = { .hw_params = s6105_hw_params, }; /* s6105 machine dapm widgets */ static const struct snd_soc_dapm_widget aic3x_dapm_widgets[] = { SND_SOC_DAPM_LINE("Audio Out Differential", NULL), SND_SOC_DAPM_LINE("Audio Out Stereo", NULL), SND_SOC_DAPM_LINE("Audio In", NULL), }; /* s6105 machine audio_mapnections to the codec pins */ static const struct snd_soc_dapm_route audio_map[] = { /* Audio Out connected to HPLOUT, HPLCOM, HPROUT */ {"Audio Out Differential", NULL, "HPLOUT"}, {"Audio Out Differential", NULL, "HPLCOM"}, {"Audio Out Stereo", NULL, "HPLOUT"}, {"Audio Out Stereo", NULL, "HPROUT"}, /* Audio In connected to LINE1L, LINE1R */ {"LINE1L", NULL, "Audio In"}, {"LINE1R", NULL, "Audio In"}, }; static int output_type_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 2; if (uinfo->value.enumerated.item) { uinfo->value.enumerated.item = 1; strcpy(uinfo->value.enumerated.name, "HPLOUT/HPROUT"); } else { strcpy(uinfo->value.enumerated.name, "HPLOUT/HPLCOM"); } return 0; } static int output_type_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.enumerated.item[0] = kcontrol->private_value; return 0; } static int output_type_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = kcontrol->private_data; unsigned int val = (ucontrol->value.enumerated.item[0] != 0); char *differential = "Audio Out Differential"; char *stereo = "Audio Out Stereo"; if (kcontrol->private_value == val) return 0; kcontrol->private_value = val; snd_soc_dapm_disable_pin(codec, val ? differential : stereo); snd_soc_dapm_sync(codec); snd_soc_dapm_enable_pin(codec, val ? stereo : differential); snd_soc_dapm_sync(codec); return 1; } static const struct snd_kcontrol_new audio_out_mux = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Master Output Mux", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .info = output_type_info, .get = output_type_get, .put = output_type_put, .private_value = 1 /* default to stereo */ }; /* Logic for a aic3x as connected on the s6105 ip camera ref design */ static int s6105_aic3x_init(struct snd_soc_codec *codec) { /* Add s6105 specific widgets */ snd_soc_dapm_new_controls(codec, aic3x_dapm_widgets, ARRAY_SIZE(aic3x_dapm_widgets)); /* Set up s6105 specific audio path audio_map */ snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); /* not present */ snd_soc_dapm_nc_pin(codec, "MONO_LOUT"); snd_soc_dapm_nc_pin(codec, "LINE2L"); snd_soc_dapm_nc_pin(codec, "LINE2R"); /* not connected */ snd_soc_dapm_nc_pin(codec, "MIC3L"); /* LINE2L on this chip */ snd_soc_dapm_nc_pin(codec, "MIC3R"); /* LINE2R on this chip */ snd_soc_dapm_nc_pin(codec, "LLOUT"); snd_soc_dapm_nc_pin(codec, "RLOUT"); snd_soc_dapm_nc_pin(codec, "HPRCOM"); /* always connected */ snd_soc_dapm_enable_pin(codec, "Audio In"); /* must correspond to audio_out_mux.private_value initializer */ snd_soc_dapm_disable_pin(codec, "Audio Out Differential"); snd_soc_dapm_sync(codec); snd_soc_dapm_enable_pin(codec, "Audio Out Stereo"); snd_soc_dapm_sync(codec); snd_ctl_add(codec->card, snd_ctl_new1(&audio_out_mux, codec)); return 0; } /* s6105 digital audio interface glue - connects codec <--> CPU */ static struct snd_soc_dai_link s6105_dai = { .name = "TLV320AIC31", .stream_name = "AIC31", .cpu_dai = &s6000_i2s_dai, .codec_dai = &aic3x_dai, .init = s6105_aic3x_init, .ops = &s6105_ops, }; /* s6105 audio machine driver */ static struct snd_soc_card snd_soc_card_s6105 = { .name = "Stretch IP Camera", .platform = &s6000_soc_platform, .dai_link = &s6105_dai, .num_links = 1, }; /* s6105 audio private data */ static struct aic3x_setup_data s6105_aic3x_setup = { }; /* s6105 audio subsystem */ static struct snd_soc_device s6105_snd_devdata = { .card = &snd_soc_card_s6105, .codec_dev = &soc_codec_dev_aic3x, .codec_data = &s6105_aic3x_setup, }; static struct s6000_snd_platform_data __initdata s6105_snd_data = { .wide = 0, .channel_in = 0, .channel_out = 1, .lines_in = 1, .lines_out = 1, .same_rate = 1, }; static struct platform_device *s6105_snd_device; /* temporary i2c device creation until this can be moved into the machine * support file. */ static struct i2c_board_info i2c_device[] = { { I2C_BOARD_INFO("tlv320aic33", 0x18), } }; static int __init s6105_init(void) { int ret; i2c_register_board_info(0, i2c_device, ARRAY_SIZE(i2c_device)); s6105_snd_device = platform_device_alloc("soc-audio", -1); if (!s6105_snd_device) return -ENOMEM; platform_set_drvdata(s6105_snd_device, &s6105_snd_devdata); s6105_snd_devdata.dev = &s6105_snd_device->dev; platform_device_add_data(s6105_snd_device, &s6105_snd_data, sizeof(s6105_snd_data)); ret = platform_device_add(s6105_snd_device); if (ret) platform_device_put(s6105_snd_device); return ret; } static void __exit s6105_exit(void) { platform_device_unregister(s6105_snd_device); } module_init(s6105_init); module_exit(s6105_exit); MODULE_AUTHOR("Daniel Gloeckner"); MODULE_DESCRIPTION("Stretch s6105 IP camera ASoC driver"); MODULE_LICENSE("GPL");
gpl-2.0
Galaxy-J5/android_kernel_samsung_j5nlte
arch/mips/alchemy/board-gpr.c
2208
6527
/* * GPR board platform device registration (Au1550) * * Copyright (C) 2010 Wolfgang Grandegger <wg@denx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/leds.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/i2c-gpio.h> #include <asm/bootinfo.h> #include <asm/idle.h> #include <asm/reboot.h> #include <asm/mach-au1x00/au1000.h> #include <prom.h> const char *get_system_type(void) { return "GPR"; } void __init prom_init(void) { unsigned char *memsize_str; unsigned long memsize; prom_argc = fw_arg0; prom_argv = (char **)fw_arg1; prom_envp = (char **)fw_arg2; prom_init_cmdline(); memsize_str = prom_getenv("memsize"); if (!memsize_str) memsize = 0x04000000; else strict_strtoul(memsize_str, 0, &memsize); add_memory_region(0, memsize, BOOT_MEM_RAM); } void prom_putchar(unsigned char c) { alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); } static void gpr_reset(char *c) { /* switch System-LED to orange (red# and green# on) */ alchemy_gpio_direction_output(4, 0); alchemy_gpio_direction_output(5, 0); /* trigger watchdog to reset board in 200ms */ printk(KERN_EMERG "Triggering watchdog soft reset...\n"); raw_local_irq_disable(); alchemy_gpio_direction_output(1, 0); udelay(1); alchemy_gpio_set_value(1, 1); while (1) cpu_wait(); } static void gpr_power_off(void) { while (1) cpu_wait(); } void __init board_setup(void) { printk(KERN_INFO "Trapeze ITS GPR board\n"); pm_power_off = gpr_power_off; _machine_halt = gpr_power_off; _machine_restart = gpr_reset; /* Enable UART1/3 */ alchemy_uart_enable(AU1000_UART3_PHYS_ADDR); alchemy_uart_enable(AU1000_UART1_PHYS_ADDR); /* Take away Reset of UMTS-card */ alchemy_gpio_direction_output(215, 1); } /* * Watchdog */ static struct resource gpr_wdt_resource[] = { [0] = { .start = 1, .end = 1, .name = "gpr-adm6320-wdt", .flags = IORESOURCE_IRQ, } }; static struct platform_device gpr_wdt_device = { .name = "adm6320-wdt", .id = 0, .num_resources = ARRAY_SIZE(gpr_wdt_resource), .resource = gpr_wdt_resource, }; /* * FLASH * * 0x00000000-0x00200000 : "kernel" * 0x00200000-0x00a00000 : "rootfs" * 0x01d00000-0x01f00000 : "config" * 0x01c00000-0x01d00000 : "yamon" * 0x01d00000-0x01d40000 : "yamon env vars" * 0x00000000-0x00a00000 : "kernel+rootfs" */ static struct mtd_partition gpr_mtd_partitions[] = { { .name = "kernel", .size = 0x00200000, .offset = 0, }, { .name = "rootfs", .size = 0x00800000, .offset = MTDPART_OFS_APPEND, .mask_flags = MTD_WRITEABLE, }, { .name = "config", .size = 0x00200000, .offset = 0x01d00000, }, { .name = "yamon", .size = 0x00100000, .offset = 0x01c00000, }, { .name = "yamon env vars", .size = 0x00040000, .offset = MTDPART_OFS_APPEND, }, { .name = "kernel+rootfs", .size = 0x00a00000, .offset = 0, }, }; static struct physmap_flash_data gpr_flash_data = { .width = 4, .nr_parts = ARRAY_SIZE(gpr_mtd_partitions), .parts = gpr_mtd_partitions, }; static struct resource gpr_mtd_resource = { .start = 0x1e000000, .end = 0x1fffffff, .flags = IORESOURCE_MEM, }; static struct platform_device gpr_mtd_device = { .name = "physmap-flash", .dev = { .platform_data = &gpr_flash_data, }, .num_resources = 1, .resource = &gpr_mtd_resource, }; /* * LEDs */ static struct gpio_led gpr_gpio_leds[] = { { /* green */ .name = "gpr:green", .gpio = 4, .active_low = 1, }, { /* red */ .name = "gpr:red", .gpio = 5, .active_low = 1, } }; static struct gpio_led_platform_data gpr_led_data = { .num_leds = ARRAY_SIZE(gpr_gpio_leds), .leds = gpr_gpio_leds, }; static struct platform_device gpr_led_devices = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &gpr_led_data, } }; /* * I2C */ static struct i2c_gpio_platform_data gpr_i2c_data = { .sda_pin = 209, .sda_is_open_drain = 1, .scl_pin = 210, .scl_is_open_drain = 1, .udelay = 2, /* ~100 kHz */ .timeout = HZ, }; static struct platform_device gpr_i2c_device = { .name = "i2c-gpio", .id = -1, .dev.platform_data = &gpr_i2c_data, }; static struct i2c_board_info gpr_i2c_info[] __initdata = { { I2C_BOARD_INFO("lm83", 0x18), .type = "lm83" } }; static struct resource alchemy_pci_host_res[] = { [0] = { .start = AU1500_PCI_PHYS_ADDR, .end = AU1500_PCI_PHYS_ADDR + 0xfff, .flags = IORESOURCE_MEM, }, }; static int gpr_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin) { if ((slot == 0) && (pin == 1)) return AU1550_PCI_INTA; else if ((slot == 0) && (pin == 2)) return AU1550_PCI_INTB; return 0xff; } static struct alchemy_pci_platdata gpr_pci_pd = { .board_map_irq = gpr_map_pci_irq, .pci_cfg_set = PCI_CONFIG_AEN | PCI_CONFIG_R2H | PCI_CONFIG_R1H | PCI_CONFIG_CH | #if defined(__MIPSEB__) PCI_CONFIG_SIC_HWA_DAT | PCI_CONFIG_SM, #else 0, #endif }; static struct platform_device gpr_pci_host_dev = { .dev.platform_data = &gpr_pci_pd, .name = "alchemy-pci", .id = 0, .num_resources = ARRAY_SIZE(alchemy_pci_host_res), .resource = alchemy_pci_host_res, }; static struct platform_device *gpr_devices[] __initdata = { &gpr_wdt_device, &gpr_mtd_device, &gpr_i2c_device, &gpr_led_devices, }; static int __init gpr_pci_init(void) { return platform_device_register(&gpr_pci_host_dev); } /* must be arch_initcall; MIPS PCI scans busses in a subsys_initcall */ arch_initcall(gpr_pci_init); static int __init gpr_dev_init(void) { i2c_register_board_info(0, gpr_i2c_info, ARRAY_SIZE(gpr_i2c_info)); return platform_add_devices(gpr_devices, ARRAY_SIZE(gpr_devices)); } device_initcall(gpr_dev_init);
gpl-2.0
ED300/android_kernel_wingtech_msm8916
arch/arm/mach-omap1/ocpi.c
2976
2806
/* * linux/arch/arm/plat-omap/ocpi.c * * Minimal OCP bus support for omap16xx * * Copyright (C) 2003 - 2005 Nokia Corporation * Copyright (C) 2012 Texas Instruments, Inc. * Written by Tony Lindgren <tony@atomide.com> * * Modified for clock framework by Paul Mundt <paul.mundt@nokia.com>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <mach/hardware.h> #include "common.h" #define OCPI_BASE 0xfffec320 #define OCPI_FAULT (OCPI_BASE + 0x00) #define OCPI_CMD_FAULT (OCPI_BASE + 0x04) #define OCPI_SINT0 (OCPI_BASE + 0x08) #define OCPI_TABORT (OCPI_BASE + 0x0c) #define OCPI_SINT1 (OCPI_BASE + 0x10) #define OCPI_PROT (OCPI_BASE + 0x14) #define OCPI_SEC (OCPI_BASE + 0x18) /* USB OHCI OCPI access error registers */ #define HOSTUEADDR 0xfffba0e0 #define HOSTUESTATUS 0xfffba0e4 static struct clk *ocpi_ck; /* * Enables device access to OMAP buses via the OCPI bridge * FIXME: Add locking */ int ocpi_enable(void) { unsigned int val; if (!cpu_is_omap16xx()) return -ENODEV; /* Enable access for OHCI in OCPI */ val = omap_readl(OCPI_PROT); val &= ~0xff; /* val &= (1 << 0); Allow access only to EMIFS */ omap_writel(val, OCPI_PROT); val = omap_readl(OCPI_SEC); val &= ~0xff; omap_writel(val, OCPI_SEC); return 0; } EXPORT_SYMBOL(ocpi_enable); static int __init omap_ocpi_init(void) { if (!cpu_is_omap16xx()) return -ENODEV; ocpi_ck = clk_get(NULL, "l3_ocpi_ck"); if (IS_ERR(ocpi_ck)) return PTR_ERR(ocpi_ck); clk_enable(ocpi_ck); ocpi_enable(); pr_info("OMAP OCPI interconnect driver loaded\n"); return 0; } static void __exit omap_ocpi_exit(void) { /* REVISIT: Disable OCPI */ if (!cpu_is_omap16xx()) return; clk_disable(ocpi_ck); clk_put(ocpi_ck); } MODULE_AUTHOR("Tony Lindgren <tony@atomide.com>"); MODULE_DESCRIPTION("OMAP OCPI bus controller module"); MODULE_LICENSE("GPL"); module_init(omap_ocpi_init); module_exit(omap_ocpi_exit);
gpl-2.0
Hellybean/android_kernel_amazon_otter-common
net/wimax/op-msg.c
4000
12490
/* * Linux WiMAX * Generic messaging interface between userspace and driver/device * * * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com> * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * This implements a direct communication channel between user space and * the driver/device, by which free form messages can be sent back and * forth. * * This is intended for device-specific features, vendor quirks, etc. * * See include/net/wimax.h * * GENERIC NETLINK ENCODING AND CAPACITY * * A destination "pipe name" is added to each message; it is up to the * drivers to assign or use those names (if using them at all). * * Messages are encoded as a binary netlink attribute using nla_put() * using type NLA_UNSPEC (as some versions of libnl still in * deployment don't yet understand NLA_BINARY). * * The maximum capacity of this transport is PAGESIZE per message (so * the actual payload will be bit smaller depending on the * netlink/generic netlink attributes and headers). * * RECEPTION OF MESSAGES * * When a message is received from user space, it is passed verbatim * to the driver calling wimax_dev->op_msg_from_user(). The return * value from this function is passed back to user space as an ack * over the generic netlink protocol. * * The stack doesn't do any processing or interpretation of these * messages. * * SENDING MESSAGES * * Messages can be sent with wimax_msg(). * * If the message delivery needs to happen on a different context to * that of its creation, wimax_msg_alloc() can be used to get a * pointer to the message that can be delivered later on with * wimax_msg_send(). * * ROADMAP * * wimax_gnl_doit_msg_from_user() Process a message from user space * wimax_dev_get_by_genl_info() * wimax_dev->op_msg_from_user() Delivery of message to the driver * * wimax_msg() Send a message to user space * wimax_msg_alloc() * wimax_msg_send() */ #include <linux/device.h> #include <linux/slab.h> #include <net/genetlink.h> #include <linux/netdevice.h> #include <linux/wimax.h> #include <linux/security.h> #include "wimax-internal.h" #define D_SUBMODULE op_msg #include "debug-levels.h" /** * wimax_msg_alloc - Create a new skb for sending a message to userspace * * @wimax_dev: WiMAX device descriptor * @pipe_name: "named pipe" the message will be sent to * @msg: pointer to the message data to send * @size: size of the message to send (in bytes), including the header. * @gfp_flags: flags for memory allocation. * * Returns: %0 if ok, negative errno code on error * * Description: * * Allocates an skb that will contain the message to send to user * space over the messaging pipe and initializes it, copying the * payload. * * Once this call is done, you can deliver it with * wimax_msg_send(). * * IMPORTANT: * * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as * wimax_msg_send() depends on skb->data being placed at the * beginning of the user message. * * Unlike other WiMAX stack calls, this call can be used way early, * even before wimax_dev_add() is called, as long as the * wimax_dev->net_dev pointer is set to point to a proper * net_dev. This is so that drivers can use it early in case they need * to send stuff around or communicate with user space. */ struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev, const char *pipe_name, const void *msg, size_t size, gfp_t gfp_flags) { int result; struct device *dev = wimax_dev_to_dev(wimax_dev); size_t msg_size; void *genl_msg; struct sk_buff *skb; msg_size = nla_total_size(size) + nla_total_size(sizeof(u32)) + (pipe_name ? nla_total_size(strlen(pipe_name)) : 0); result = -ENOMEM; skb = genlmsg_new(msg_size, gfp_flags); if (skb == NULL) goto error_new; genl_msg = genlmsg_put(skb, 0, 0, &wimax_gnl_family, 0, WIMAX_GNL_OP_MSG_TO_USER); if (genl_msg == NULL) { dev_err(dev, "no memory to create generic netlink message\n"); goto error_genlmsg_put; } result = nla_put_u32(skb, WIMAX_GNL_MSG_IFIDX, wimax_dev->net_dev->ifindex); if (result < 0) { dev_err(dev, "no memory to add ifindex attribute\n"); goto error_nla_put; } if (pipe_name) { result = nla_put_string(skb, WIMAX_GNL_MSG_PIPE_NAME, pipe_name); if (result < 0) { dev_err(dev, "no memory to add pipe_name attribute\n"); goto error_nla_put; } } result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg); if (result < 0) { dev_err(dev, "no memory to add payload (msg %p size %zu) in " "attribute: %d\n", msg, size, result); goto error_nla_put; } genlmsg_end(skb, genl_msg); return skb; error_nla_put: error_genlmsg_put: error_new: nlmsg_free(skb); return ERR_PTR(result); } EXPORT_SYMBOL_GPL(wimax_msg_alloc); /** * wimax_msg_data_len - Return a pointer and size of a message's payload * * @msg: Pointer to a message created with wimax_msg_alloc() * @size: Pointer to where to store the message's size * * Returns the pointer to the message data. */ const void *wimax_msg_data_len(struct sk_buff *msg, size_t *size) { struct nlmsghdr *nlh = (void *) msg->head; struct nlattr *nla; nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr), WIMAX_GNL_MSG_DATA); if (nla == NULL) { printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n"); return NULL; } *size = nla_len(nla); return nla_data(nla); } EXPORT_SYMBOL_GPL(wimax_msg_data_len); /** * wimax_msg_data - Return a pointer to a message's payload * * @msg: Pointer to a message created with wimax_msg_alloc() */ const void *wimax_msg_data(struct sk_buff *msg) { struct nlmsghdr *nlh = (void *) msg->head; struct nlattr *nla; nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr), WIMAX_GNL_MSG_DATA); if (nla == NULL) { printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n"); return NULL; } return nla_data(nla); } EXPORT_SYMBOL_GPL(wimax_msg_data); /** * wimax_msg_len - Return a message's payload length * * @msg: Pointer to a message created with wimax_msg_alloc() */ ssize_t wimax_msg_len(struct sk_buff *msg) { struct nlmsghdr *nlh = (void *) msg->head; struct nlattr *nla; nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr), WIMAX_GNL_MSG_DATA); if (nla == NULL) { printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n"); return -EINVAL; } return nla_len(nla); } EXPORT_SYMBOL_GPL(wimax_msg_len); /** * wimax_msg_send - Send a pre-allocated message to user space * * @wimax_dev: WiMAX device descriptor * * @skb: &struct sk_buff returned by wimax_msg_alloc(). Note the * ownership of @skb is transferred to this function. * * Returns: 0 if ok, < 0 errno code on error * * Description: * * Sends a free-form message that was preallocated with * wimax_msg_alloc() and filled up. * * Assumes that once you pass an skb to this function for sending, it * owns it and will release it when done (on success). * * IMPORTANT: * * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as * wimax_msg_send() depends on skb->data being placed at the * beginning of the user message. * * Unlike other WiMAX stack calls, this call can be used way early, * even before wimax_dev_add() is called, as long as the * wimax_dev->net_dev pointer is set to point to a proper * net_dev. This is so that drivers can use it early in case they need * to send stuff around or communicate with user space. */ int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb) { struct device *dev = wimax_dev_to_dev(wimax_dev); void *msg = skb->data; size_t size = skb->len; might_sleep(); d_printf(1, dev, "CTX: wimax msg, %zu bytes\n", size); d_dump(2, dev, msg, size); genlmsg_multicast(skb, 0, wimax_gnl_mcg.id, GFP_KERNEL); d_printf(1, dev, "CTX: genl multicast done\n"); return 0; } EXPORT_SYMBOL_GPL(wimax_msg_send); /** * wimax_msg - Send a message to user space * * @wimax_dev: WiMAX device descriptor (properly referenced) * @pipe_name: "named pipe" the message will be sent to * @buf: pointer to the message to send. * @size: size of the buffer pointed to by @buf (in bytes). * @gfp_flags: flags for memory allocation. * * Returns: %0 if ok, negative errno code on error. * * Description: * * Sends a free-form message to user space on the device @wimax_dev. * * NOTES: * * Once the @skb is given to this function, who will own it and will * release it when done (unless it returns error). */ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name, const void *buf, size_t size, gfp_t gfp_flags) { int result = -ENOMEM; struct sk_buff *skb; skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags); if (IS_ERR(skb)) result = PTR_ERR(skb); else result = wimax_msg_send(wimax_dev, skb); return result; } EXPORT_SYMBOL_GPL(wimax_msg); static const struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = { [WIMAX_GNL_MSG_IFIDX] = { .type = NLA_U32, }, [WIMAX_GNL_MSG_DATA] = { .type = NLA_UNSPEC, /* libnl doesn't grok BINARY yet */ }, }; /* * Relays a message from user space to the driver * * The skb is passed to the driver-specific function with the netlink * and generic netlink headers already stripped. * * This call will block while handling/relaying the message. */ static int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info) { int result, ifindex; struct wimax_dev *wimax_dev; struct device *dev; struct nlmsghdr *nlh = info->nlhdr; char *pipe_name; void *msg_buf; size_t msg_len; might_sleep(); d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); result = -ENODEV; if (info->attrs[WIMAX_GNL_MSG_IFIDX] == NULL) { printk(KERN_ERR "WIMAX_GNL_MSG_FROM_USER: can't find IFIDX " "attribute\n"); goto error_no_wimax_dev; } ifindex = nla_get_u32(info->attrs[WIMAX_GNL_MSG_IFIDX]); wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); if (wimax_dev == NULL) goto error_no_wimax_dev; dev = wimax_dev_to_dev(wimax_dev); /* Unpack arguments */ result = -EINVAL; if (info->attrs[WIMAX_GNL_MSG_DATA] == NULL) { dev_err(dev, "WIMAX_GNL_MSG_FROM_USER: can't find MSG_DATA " "attribute\n"); goto error_no_data; } msg_buf = nla_data(info->attrs[WIMAX_GNL_MSG_DATA]); msg_len = nla_len(info->attrs[WIMAX_GNL_MSG_DATA]); if (info->attrs[WIMAX_GNL_MSG_PIPE_NAME] == NULL) pipe_name = NULL; else { struct nlattr *attr = info->attrs[WIMAX_GNL_MSG_PIPE_NAME]; size_t attr_len = nla_len(attr); /* libnl-1.1 does not yet support NLA_NUL_STRING */ result = -ENOMEM; pipe_name = kstrndup(nla_data(attr), attr_len + 1, GFP_KERNEL); if (pipe_name == NULL) goto error_alloc; pipe_name[attr_len] = 0; } mutex_lock(&wimax_dev->mutex); result = wimax_dev_is_ready(wimax_dev); if (result == -ENOMEDIUM) result = 0; if (result < 0) goto error_not_ready; result = -ENOSYS; if (wimax_dev->op_msg_from_user == NULL) goto error_noop; d_printf(1, dev, "CRX: nlmsghdr len %u type %u flags 0x%04x seq 0x%x pid %u\n", nlh->nlmsg_len, nlh->nlmsg_type, nlh->nlmsg_flags, nlh->nlmsg_seq, nlh->nlmsg_pid); d_printf(1, dev, "CRX: wimax message %zu bytes\n", msg_len); d_dump(2, dev, msg_buf, msg_len); result = wimax_dev->op_msg_from_user(wimax_dev, pipe_name, msg_buf, msg_len, info); error_noop: error_not_ready: mutex_unlock(&wimax_dev->mutex); error_alloc: kfree(pipe_name); error_no_data: dev_put(wimax_dev->net_dev); error_no_wimax_dev: d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result); return result; } /* * Generic Netlink glue */ struct genl_ops wimax_gnl_msg_from_user = { .cmd = WIMAX_GNL_OP_MSG_FROM_USER, .flags = GENL_ADMIN_PERM, .policy = wimax_gnl_msg_policy, .doit = wimax_gnl_doit_msg_from_user, .dumpit = NULL, };
gpl-2.0
ULL-ETSII-SistemasEmpotrados/Mirasberry
arch/um/os-Linux/user_syms.c
4512
3262
#include <linux/types.h> #include <linux/module.h> /* Some of this are builtin function (some are not but could in the future), * so I *must* declare good prototypes for them and then EXPORT them. * The kernel code uses the macro defined by include/linux/string.h, * so I undef macros; the userspace code does not include that and I * add an EXPORT for the glibc one. */ #undef strlen #undef strstr #undef memcpy #undef memset extern size_t strlen(const char *); extern void *memmove(void *, const void *, size_t); extern void *memset(void *, int, size_t); extern int printf(const char *, ...); /* If it's not defined, the export is included in lib/string.c.*/ #ifdef __HAVE_ARCH_STRSTR EXPORT_SYMBOL(strstr); #endif #ifndef __x86_64__ extern void *memcpy(void *, const void *, size_t); EXPORT_SYMBOL(memcpy); #endif EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(printf); /* Here, instead, I can provide a fake prototype. Yes, someone cares: genksyms. * However, the modules will use the CRC defined *here*, no matter if it is * good; so the versions of these symbols will always match */ #define EXPORT_SYMBOL_PROTO(sym) \ int sym(void); \ EXPORT_SYMBOL(sym); extern void readdir64(void) __attribute__((weak)); EXPORT_SYMBOL(readdir64); extern void truncate64(void) __attribute__((weak)); EXPORT_SYMBOL(truncate64); #ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA EXPORT_SYMBOL(vsyscall_ehdr); EXPORT_SYMBOL(vsyscall_end); #endif EXPORT_SYMBOL_PROTO(__errno_location); EXPORT_SYMBOL_PROTO(access); EXPORT_SYMBOL_PROTO(open); EXPORT_SYMBOL_PROTO(open64); EXPORT_SYMBOL_PROTO(close); EXPORT_SYMBOL_PROTO(read); EXPORT_SYMBOL_PROTO(write); EXPORT_SYMBOL_PROTO(dup2); EXPORT_SYMBOL_PROTO(__xstat); EXPORT_SYMBOL_PROTO(__lxstat); EXPORT_SYMBOL_PROTO(__lxstat64); EXPORT_SYMBOL_PROTO(__fxstat64); EXPORT_SYMBOL_PROTO(lseek); EXPORT_SYMBOL_PROTO(lseek64); EXPORT_SYMBOL_PROTO(chown); EXPORT_SYMBOL_PROTO(fchown); EXPORT_SYMBOL_PROTO(truncate); EXPORT_SYMBOL_PROTO(ftruncate64); EXPORT_SYMBOL_PROTO(utime); EXPORT_SYMBOL_PROTO(utimes); EXPORT_SYMBOL_PROTO(futimes); EXPORT_SYMBOL_PROTO(chmod); EXPORT_SYMBOL_PROTO(fchmod); EXPORT_SYMBOL_PROTO(rename); EXPORT_SYMBOL_PROTO(__xmknod); EXPORT_SYMBOL_PROTO(symlink); EXPORT_SYMBOL_PROTO(link); EXPORT_SYMBOL_PROTO(unlink); EXPORT_SYMBOL_PROTO(readlink); EXPORT_SYMBOL_PROTO(mkdir); EXPORT_SYMBOL_PROTO(rmdir); EXPORT_SYMBOL_PROTO(opendir); EXPORT_SYMBOL_PROTO(readdir); EXPORT_SYMBOL_PROTO(closedir); EXPORT_SYMBOL_PROTO(seekdir); EXPORT_SYMBOL_PROTO(telldir); EXPORT_SYMBOL_PROTO(ioctl); EXPORT_SYMBOL_PROTO(pread64); EXPORT_SYMBOL_PROTO(pwrite64); EXPORT_SYMBOL_PROTO(statfs); EXPORT_SYMBOL_PROTO(statfs64); EXPORT_SYMBOL_PROTO(getuid); EXPORT_SYMBOL_PROTO(fsync); EXPORT_SYMBOL_PROTO(fdatasync); EXPORT_SYMBOL_PROTO(lstat64); EXPORT_SYMBOL_PROTO(fstat64); EXPORT_SYMBOL_PROTO(mknod); /* Export symbols used by GCC for the stack protector. */ extern void __stack_smash_handler(void *) __attribute__((weak)); EXPORT_SYMBOL(__stack_smash_handler); extern long __guard __attribute__((weak)); EXPORT_SYMBOL(__guard); #ifdef _FORTIFY_SOURCE extern int __sprintf_chk(char *str, int flag, size_t strlen, const char *format); EXPORT_SYMBOL(__sprintf_chk); #endif
gpl-2.0
Motorhead1991/android_kernel_blu_studio5qcom
drivers/net/wireless/ath/ath9k/debug.c
4768
48911
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/export.h> #include <asm/unaligned.h> #include "ath9k.h" #define REG_WRITE_D(_ah, _reg, _val) \ ath9k_hw_common(_ah)->ops->write((_ah), (_val), (_reg)) #define REG_READ_D(_ah, _reg) \ ath9k_hw_common(_ah)->ops->read((_ah), (_reg)) static ssize_t ath9k_debugfs_read_buf(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { u8 *buf = file->private_data; return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); } static int ath9k_debugfs_release_buf(struct inode *inode, struct file *file) { vfree(file->private_data); return 0; } #ifdef CONFIG_ATH_DEBUG static ssize_t read_file_debug(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); char buf[32]; unsigned int len; len = sprintf(buf, "0x%08x\n", common->debug_mask); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_debug(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); unsigned long mask; char buf[32]; ssize_t len; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (strict_strtoul(buf, 0, &mask)) return -EINVAL; common->debug_mask = mask; return count; } static const struct file_operations fops_debug = { .read = read_file_debug, .write = write_file_debug, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; #endif #define DMA_BUF_LEN 1024 static ssize_t read_file_tx_chainmask(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_hw *ah = sc->sc_ah; char buf[32]; unsigned int len; len = sprintf(buf, "0x%08x\n", ah->txchainmask); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_tx_chainmask(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_hw *ah = sc->sc_ah; unsigned long mask; char buf[32]; ssize_t len; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (strict_strtoul(buf, 0, &mask)) return -EINVAL; ah->txchainmask = mask; ah->caps.tx_chainmask = mask; return count; } static const struct file_operations fops_tx_chainmask = { .read = read_file_tx_chainmask, .write = write_file_tx_chainmask, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t read_file_rx_chainmask(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_hw *ah = sc->sc_ah; char buf[32]; unsigned int len; len = sprintf(buf, "0x%08x\n", ah->rxchainmask); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_rx_chainmask(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_hw *ah = sc->sc_ah; unsigned long mask; char buf[32]; ssize_t len; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (strict_strtoul(buf, 0, &mask)) return -EINVAL; ah->rxchainmask = mask; ah->caps.rx_chainmask = mask; return count; } static const struct file_operations fops_rx_chainmask = { .read = read_file_rx_chainmask, .write = write_file_rx_chainmask, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t read_file_disable_ani(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); char buf[32]; unsigned int len; len = sprintf(buf, "%d\n", common->disable_ani); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_disable_ani(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); unsigned long disable_ani; char buf[32]; ssize_t len; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (strict_strtoul(buf, 0, &disable_ani)) return -EINVAL; common->disable_ani = !!disable_ani; if (disable_ani) { sc->sc_flags &= ~SC_OP_ANI_RUN; del_timer_sync(&common->ani.timer); } else { sc->sc_flags |= SC_OP_ANI_RUN; ath_start_ani(common); } return count; } static const struct file_operations fops_disable_ani = { .read = read_file_disable_ani, .write = write_file_disable_ani, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t read_file_dma(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_hw *ah = sc->sc_ah; char *buf; int retval; unsigned int len = 0; u32 val[ATH9K_NUM_DMA_DEBUG_REGS]; int i, qcuOffset = 0, dcuOffset = 0; u32 *qcuBase = &val[0], *dcuBase = &val[4]; buf = kmalloc(DMA_BUF_LEN, GFP_KERNEL); if (!buf) return -ENOMEM; ath9k_ps_wakeup(sc); REG_WRITE_D(ah, AR_MACMISC, ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) | (AR_MACMISC_MISC_OBS_BUS_1 << AR_MACMISC_MISC_OBS_BUS_MSB_S))); len += snprintf(buf + len, DMA_BUF_LEN - len, "Raw DMA Debug values:\n"); for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) { if (i % 4 == 0) len += snprintf(buf + len, DMA_BUF_LEN - len, "\n"); val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32))); len += snprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ", i, val[i]); } len += snprintf(buf + len, DMA_BUF_LEN - len, "\n\n"); len += snprintf(buf + len, DMA_BUF_LEN - len, "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n"); for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) { if (i == 8) { qcuOffset = 0; qcuBase++; } if (i == 6) { dcuOffset = 0; dcuBase++; } len += snprintf(buf + len, DMA_BUF_LEN - len, "%2d %2x %1x %2x %2x\n", i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset, (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3), val[2] & (0x7 << (i * 3)) >> (i * 3), (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset); } len += snprintf(buf + len, DMA_BUF_LEN - len, "\n"); len += snprintf(buf + len, DMA_BUF_LEN - len, "qcu_stitch state: %2x qcu_fetch state: %2x\n", (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22); len += snprintf(buf + len, DMA_BUF_LEN - len, "qcu_complete state: %2x dcu_complete state: %2x\n", (val[3] & 0x1c000000) >> 26, (val[6] & 0x3)); len += snprintf(buf + len, DMA_BUF_LEN - len, "dcu_arb state: %2x dcu_fp state: %2x\n", (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27); len += snprintf(buf + len, DMA_BUF_LEN - len, "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n", (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10); len += snprintf(buf + len, DMA_BUF_LEN - len, "txfifo_valid_0: %1d txfifo_valid_1: %1d\n", (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12); len += snprintf(buf + len, DMA_BUF_LEN - len, "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n", (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17); len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n", REG_READ_D(ah, AR_OBS_BUS_1)); len += snprintf(buf + len, DMA_BUF_LEN - len, "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR)); ath9k_ps_restore(sc); if (len > DMA_BUF_LEN) len = DMA_BUF_LEN; retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return retval; } static const struct file_operations fops_dma = { .read = read_file_dma, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status) { if (status) sc->debug.stats.istats.total++; if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { if (status & ATH9K_INT_RXLP) sc->debug.stats.istats.rxlp++; if (status & ATH9K_INT_RXHP) sc->debug.stats.istats.rxhp++; if (status & ATH9K_INT_BB_WATCHDOG) sc->debug.stats.istats.bb_watchdog++; } else { if (status & ATH9K_INT_RX) sc->debug.stats.istats.rxok++; } if (status & ATH9K_INT_RXEOL) sc->debug.stats.istats.rxeol++; if (status & ATH9K_INT_RXORN) sc->debug.stats.istats.rxorn++; if (status & ATH9K_INT_TX) sc->debug.stats.istats.txok++; if (status & ATH9K_INT_TXURN) sc->debug.stats.istats.txurn++; if (status & ATH9K_INT_MIB) sc->debug.stats.istats.mib++; if (status & ATH9K_INT_RXPHY) sc->debug.stats.istats.rxphyerr++; if (status & ATH9K_INT_RXKCM) sc->debug.stats.istats.rx_keycache_miss++; if (status & ATH9K_INT_SWBA) sc->debug.stats.istats.swba++; if (status & ATH9K_INT_BMISS) sc->debug.stats.istats.bmiss++; if (status & ATH9K_INT_BNR) sc->debug.stats.istats.bnr++; if (status & ATH9K_INT_CST) sc->debug.stats.istats.cst++; if (status & ATH9K_INT_GTT) sc->debug.stats.istats.gtt++; if (status & ATH9K_INT_TIM) sc->debug.stats.istats.tim++; if (status & ATH9K_INT_CABEND) sc->debug.stats.istats.cabend++; if (status & ATH9K_INT_DTIMSYNC) sc->debug.stats.istats.dtimsync++; if (status & ATH9K_INT_DTIM) sc->debug.stats.istats.dtim++; if (status & ATH9K_INT_TSFOOR) sc->debug.stats.istats.tsfoor++; } static ssize_t read_file_interrupt(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; char buf[512]; unsigned int len = 0; if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "RXHP", sc->debug.stats.istats.rxhp); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "WATCHDOG", sc->debug.stats.istats.bb_watchdog); } else { len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok); } len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "RXEOL", sc->debug.stats.istats.rxeol); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "RXORN", sc->debug.stats.istats.rxorn); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "TX", sc->debug.stats.istats.txok); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "TXURN", sc->debug.stats.istats.txurn); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "MIB", sc->debug.stats.istats.mib); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "RXPHY", sc->debug.stats.istats.rxphyerr); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "RXKCM", sc->debug.stats.istats.rx_keycache_miss); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "SWBA", sc->debug.stats.istats.swba); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "BMISS", sc->debug.stats.istats.bmiss); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "BNR", sc->debug.stats.istats.bnr); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "CST", sc->debug.stats.istats.cst); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "GTT", sc->debug.stats.istats.gtt); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "TIM", sc->debug.stats.istats.tim); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "CABEND", sc->debug.stats.istats.cabend); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "DTIMSYNC", sc->debug.stats.istats.dtimsync); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "DTIM", sc->debug.stats.istats.dtim); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "TSFOOR", sc->debug.stats.istats.tsfoor); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "TOTAL", sc->debug.stats.istats.total); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_interrupt = { .read = read_file_interrupt, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; #define PR_QNUM(_n) sc->tx.txq_map[_n]->axq_qnum #define PR(str, elem) \ do { \ len += snprintf(buf + len, size - len, \ "%s%13u%11u%10u%10u\n", str, \ sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem, \ sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem, \ sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem, \ sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem); \ if (len >= size) \ goto done; \ } while(0) #define PRX(str, elem) \ do { \ len += snprintf(buf + len, size - len, \ "%s%13u%11u%10u%10u\n", str, \ (unsigned int)(sc->tx.txq_map[WME_AC_BE]->elem), \ (unsigned int)(sc->tx.txq_map[WME_AC_BK]->elem), \ (unsigned int)(sc->tx.txq_map[WME_AC_VI]->elem), \ (unsigned int)(sc->tx.txq_map[WME_AC_VO]->elem)); \ if (len >= size) \ goto done; \ } while(0) #define PRQLE(str, elem) \ do { \ len += snprintf(buf + len, size - len, \ "%s%13i%11i%10i%10i\n", str, \ list_empty(&sc->tx.txq_map[WME_AC_BE]->elem), \ list_empty(&sc->tx.txq_map[WME_AC_BK]->elem), \ list_empty(&sc->tx.txq_map[WME_AC_VI]->elem), \ list_empty(&sc->tx.txq_map[WME_AC_VO]->elem)); \ if (len >= size) \ goto done; \ } while (0) static ssize_t read_file_xmit(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; char *buf; unsigned int len = 0, size = 8000; int i; ssize_t retval = 0; char tmp[32]; buf = kzalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; len += sprintf(buf, "Num-Tx-Queues: %i tx-queues-setup: 0x%x" " poll-work-seen: %u\n" "%30s %10s%10s%10s\n\n", ATH9K_NUM_TX_QUEUES, sc->tx.txqsetup, sc->tx_complete_poll_work_seen, "BE", "BK", "VI", "VO"); PR("MPDUs Queued: ", queued); PR("MPDUs Completed: ", completed); PR("MPDUs XRetried: ", xretries); PR("Aggregates: ", a_aggr); PR("AMPDUs Queued HW:", a_queued_hw); PR("AMPDUs Queued SW:", a_queued_sw); PR("AMPDUs Completed:", a_completed); PR("AMPDUs Retried: ", a_retries); PR("AMPDUs XRetried: ", a_xretries); PR("FIFO Underrun: ", fifo_underrun); PR("TXOP Exceeded: ", xtxop); PR("TXTIMER Expiry: ", timer_exp); PR("DESC CFG Error: ", desc_cfg_err); PR("DATA Underrun: ", data_underrun); PR("DELIM Underrun: ", delim_underrun); PR("TX-Pkts-All: ", tx_pkts_all); PR("TX-Bytes-All: ", tx_bytes_all); PR("hw-put-tx-buf: ", puttxbuf); PR("hw-tx-start: ", txstart); PR("hw-tx-proc-desc: ", txprocdesc); len += snprintf(buf + len, size - len, "%s%11p%11p%10p%10p\n", "txq-memory-address:", sc->tx.txq_map[WME_AC_BE], sc->tx.txq_map[WME_AC_BK], sc->tx.txq_map[WME_AC_VI], sc->tx.txq_map[WME_AC_VO]); if (len >= size) goto done; PRX("axq-qnum: ", axq_qnum); PRX("axq-depth: ", axq_depth); PRX("axq-ampdu_depth: ", axq_ampdu_depth); PRX("axq-stopped ", stopped); PRX("tx-in-progress ", axq_tx_inprogress); PRX("pending-frames ", pending_frames); PRX("txq_headidx: ", txq_headidx); PRX("txq_tailidx: ", txq_headidx); PRQLE("axq_q empty: ", axq_q); PRQLE("axq_acq empty: ", axq_acq); for (i = 0; i < ATH_TXFIFO_DEPTH; i++) { snprintf(tmp, sizeof(tmp) - 1, "txq_fifo[%i] empty: ", i); PRQLE(tmp, txq_fifo[i]); } /* Print out more detailed queue-info */ for (i = 0; i <= WME_AC_BK; i++) { struct ath_txq *txq = &(sc->tx.txq[i]); struct ath_atx_ac *ac; struct ath_atx_tid *tid; if (len >= size) goto done; spin_lock_bh(&txq->axq_lock); if (!list_empty(&txq->axq_acq)) { ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); len += snprintf(buf + len, size - len, "txq[%i] first-ac: %p sched: %i\n", i, ac, ac->sched); if (list_empty(&ac->tid_q) || (len >= size)) goto done_for; tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); len += snprintf(buf + len, size - len, " first-tid: %p sched: %i paused: %i\n", tid, tid->sched, tid->paused); } done_for: spin_unlock_bh(&txq->axq_lock); } done: if (len > size) len = size; retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return retval; } static ssize_t read_file_stations(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; char *buf; unsigned int len = 0, size = 64000; struct ath_node *an = NULL; ssize_t retval = 0; int q; buf = kzalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; len += snprintf(buf + len, size - len, "Stations:\n" " tid: addr sched paused buf_q-empty an ac baw\n" " ac: addr sched tid_q-empty txq\n"); spin_lock(&sc->nodes_lock); list_for_each_entry(an, &sc->nodes, list) { unsigned short ma = an->maxampdu; if (ma == 0) ma = 65535; /* see ath_lookup_rate */ len += snprintf(buf + len, size - len, "iface: %pM sta: %pM max-ampdu: %hu mpdu-density: %uus\n", an->vif->addr, an->sta->addr, ma, (unsigned int)(an->mpdudensity)); if (len >= size) goto done; for (q = 0; q < WME_NUM_TID; q++) { struct ath_atx_tid *tid = &(an->tid[q]); len += snprintf(buf + len, size - len, " tid: %p %s %s %i %p %p %hu\n", tid, tid->sched ? "sched" : "idle", tid->paused ? "paused" : "running", skb_queue_empty(&tid->buf_q), tid->an, tid->ac, tid->baw_size); if (len >= size) goto done; } for (q = 0; q < WME_NUM_AC; q++) { struct ath_atx_ac *ac = &(an->ac[q]); len += snprintf(buf + len, size - len, " ac: %p %s %i %p\n", ac, ac->sched ? "sched" : "idle", list_empty(&ac->tid_q), ac->txq); if (len >= size) goto done; } } done: spin_unlock(&sc->nodes_lock); if (len > size) len = size; retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return retval; } static ssize_t read_file_misc(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ieee80211_hw *hw = sc->hw; struct ath9k_vif_iter_data iter_data; char buf[512]; unsigned int len = 0; ssize_t retval = 0; unsigned int reg; u32 rxfilter; len += snprintf(buf + len, sizeof(buf) - len, "BSSID: %pM\n", common->curbssid); len += snprintf(buf + len, sizeof(buf) - len, "BSSID-MASK: %pM\n", common->bssidmask); len += snprintf(buf + len, sizeof(buf) - len, "OPMODE: %s\n", ath_opmode_to_string(sc->sc_ah->opmode)); ath9k_ps_wakeup(sc); rxfilter = ath9k_hw_getrxfilter(sc->sc_ah); ath9k_ps_restore(sc); len += snprintf(buf + len, sizeof(buf) - len, "RXFILTER: 0x%x", rxfilter); if (rxfilter & ATH9K_RX_FILTER_UCAST) len += snprintf(buf + len, sizeof(buf) - len, " UCAST"); if (rxfilter & ATH9K_RX_FILTER_MCAST) len += snprintf(buf + len, sizeof(buf) - len, " MCAST"); if (rxfilter & ATH9K_RX_FILTER_BCAST) len += snprintf(buf + len, sizeof(buf) - len, " BCAST"); if (rxfilter & ATH9K_RX_FILTER_CONTROL) len += snprintf(buf + len, sizeof(buf) - len, " CONTROL"); if (rxfilter & ATH9K_RX_FILTER_BEACON) len += snprintf(buf + len, sizeof(buf) - len, " BEACON"); if (rxfilter & ATH9K_RX_FILTER_PROM) len += snprintf(buf + len, sizeof(buf) - len, " PROM"); if (rxfilter & ATH9K_RX_FILTER_PROBEREQ) len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ"); if (rxfilter & ATH9K_RX_FILTER_PHYERR) len += snprintf(buf + len, sizeof(buf) - len, " PHYERR"); if (rxfilter & ATH9K_RX_FILTER_MYBEACON) len += snprintf(buf + len, sizeof(buf) - len, " MYBEACON"); if (rxfilter & ATH9K_RX_FILTER_COMP_BAR) len += snprintf(buf + len, sizeof(buf) - len, " COMP_BAR"); if (rxfilter & ATH9K_RX_FILTER_PSPOLL) len += snprintf(buf + len, sizeof(buf) - len, " PSPOLL"); if (rxfilter & ATH9K_RX_FILTER_PHYRADAR) len += snprintf(buf + len, sizeof(buf) - len, " PHYRADAR"); if (rxfilter & ATH9K_RX_FILTER_MCAST_BCAST_ALL) len += snprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL"); if (rxfilter & ATH9K_RX_FILTER_CONTROL_WRAPPER) len += snprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER"); len += snprintf(buf + len, sizeof(buf) - len, "\n"); reg = sc->sc_ah->imask; len += snprintf(buf + len, sizeof(buf) - len, "INTERRUPT-MASK: 0x%x", reg); if (reg & ATH9K_INT_SWBA) len += snprintf(buf + len, sizeof(buf) - len, " SWBA"); if (reg & ATH9K_INT_BMISS) len += snprintf(buf + len, sizeof(buf) - len, " BMISS"); if (reg & ATH9K_INT_CST) len += snprintf(buf + len, sizeof(buf) - len, " CST"); if (reg & ATH9K_INT_RX) len += snprintf(buf + len, sizeof(buf) - len, " RX"); if (reg & ATH9K_INT_RXHP) len += snprintf(buf + len, sizeof(buf) - len, " RXHP"); if (reg & ATH9K_INT_RXLP) len += snprintf(buf + len, sizeof(buf) - len, " RXLP"); if (reg & ATH9K_INT_BB_WATCHDOG) len += snprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG"); len += snprintf(buf + len, sizeof(buf) - len, "\n"); ath9k_calculate_iter_data(hw, NULL, &iter_data); len += snprintf(buf + len, sizeof(buf) - len, "VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i" " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n", iter_data.naps, iter_data.nstations, iter_data.nmeshes, iter_data.nwds, iter_data.nadhocs, sc->nvifs, sc->nbcnvifs); if (len > sizeof(buf)) len = sizeof(buf); retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); return retval; } static ssize_t read_file_reset(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; char buf[512]; unsigned int len = 0; len += snprintf(buf + len, sizeof(buf) - len, "%17s: %2d\n", "Baseband Hang", sc->debug.stats.reset[RESET_TYPE_BB_HANG]); len += snprintf(buf + len, sizeof(buf) - len, "%17s: %2d\n", "Baseband Watchdog", sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]); len += snprintf(buf + len, sizeof(buf) - len, "%17s: %2d\n", "Fatal HW Error", sc->debug.stats.reset[RESET_TYPE_FATAL_INT]); len += snprintf(buf + len, sizeof(buf) - len, "%17s: %2d\n", "TX HW error", sc->debug.stats.reset[RESET_TYPE_TX_ERROR]); len += snprintf(buf + len, sizeof(buf) - len, "%17s: %2d\n", "TX Path Hang", sc->debug.stats.reset[RESET_TYPE_TX_HANG]); len += snprintf(buf + len, sizeof(buf) - len, "%17s: %2d\n", "PLL RX Hang", sc->debug.stats.reset[RESET_TYPE_PLL_HANG]); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, struct ath_tx_status *ts, struct ath_txq *txq, unsigned int flags) { #define TX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].ts\ [sc->debug.tsidx].c) int qnum = txq->axq_qnum; TX_STAT_INC(qnum, tx_pkts_all); sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len; if (bf_isampdu(bf)) { if (flags & ATH_TX_ERROR) TX_STAT_INC(qnum, a_xretries); else TX_STAT_INC(qnum, a_completed); } else { if (ts->ts_status & ATH9K_TXERR_XRETRY) TX_STAT_INC(qnum, xretries); else TX_STAT_INC(qnum, completed); } if (ts->ts_status & ATH9K_TXERR_FIFO) TX_STAT_INC(qnum, fifo_underrun); if (ts->ts_status & ATH9K_TXERR_XTXOP) TX_STAT_INC(qnum, xtxop); if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED) TX_STAT_INC(qnum, timer_exp); if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR) TX_STAT_INC(qnum, desc_cfg_err); if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN) TX_STAT_INC(qnum, data_underrun); if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN) TX_STAT_INC(qnum, delim_underrun); #ifdef CONFIG_ATH9K_MAC_DEBUG spin_lock(&sc->debug.samp_lock); TX_SAMP_DBG(jiffies) = jiffies; TX_SAMP_DBG(rssi_ctl0) = ts->ts_rssi_ctl0; TX_SAMP_DBG(rssi_ctl1) = ts->ts_rssi_ctl1; TX_SAMP_DBG(rssi_ctl2) = ts->ts_rssi_ctl2; TX_SAMP_DBG(rssi_ext0) = ts->ts_rssi_ext0; TX_SAMP_DBG(rssi_ext1) = ts->ts_rssi_ext1; TX_SAMP_DBG(rssi_ext2) = ts->ts_rssi_ext2; TX_SAMP_DBG(rateindex) = ts->ts_rateindex; TX_SAMP_DBG(isok) = !!(ts->ts_status & ATH9K_TXERR_MASK); TX_SAMP_DBG(rts_fail_cnt) = ts->ts_shortretry; TX_SAMP_DBG(data_fail_cnt) = ts->ts_longretry; TX_SAMP_DBG(rssi) = ts->ts_rssi; TX_SAMP_DBG(tid) = ts->tid; TX_SAMP_DBG(qid) = ts->qid; if (ts->ts_flags & ATH9K_TX_BA) { TX_SAMP_DBG(ba_low) = ts->ba_low; TX_SAMP_DBG(ba_high) = ts->ba_high; } else { TX_SAMP_DBG(ba_low) = 0; TX_SAMP_DBG(ba_high) = 0; } sc->debug.tsidx = (sc->debug.tsidx + 1) % ATH_DBG_MAX_SAMPLES; spin_unlock(&sc->debug.samp_lock); #endif #undef TX_SAMP_DBG } static const struct file_operations fops_xmit = { .read = read_file_xmit, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static const struct file_operations fops_stations = { .read = read_file_stations, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static const struct file_operations fops_misc = { .read = read_file_misc, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static const struct file_operations fops_reset = { .read = read_file_reset, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t read_file_recv(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { #define PHY_ERR(s, p) \ len += snprintf(buf + len, size - len, "%22s : %10u\n", s, \ sc->debug.stats.rxstats.phy_err_stats[p]); struct ath_softc *sc = file->private_data; char *buf; unsigned int len = 0, size = 1600; ssize_t retval = 0; buf = kzalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; len += snprintf(buf + len, size - len, "%22s : %10u\n", "CRC ERR", sc->debug.stats.rxstats.crc_err); len += snprintf(buf + len, size - len, "%22s : %10u\n", "DECRYPT CRC ERR", sc->debug.stats.rxstats.decrypt_crc_err); len += snprintf(buf + len, size - len, "%22s : %10u\n", "PHY ERR", sc->debug.stats.rxstats.phy_err); len += snprintf(buf + len, size - len, "%22s : %10u\n", "MIC ERR", sc->debug.stats.rxstats.mic_err); len += snprintf(buf + len, size - len, "%22s : %10u\n", "PRE-DELIM CRC ERR", sc->debug.stats.rxstats.pre_delim_crc_err); len += snprintf(buf + len, size - len, "%22s : %10u\n", "POST-DELIM CRC ERR", sc->debug.stats.rxstats.post_delim_crc_err); len += snprintf(buf + len, size - len, "%22s : %10u\n", "DECRYPT BUSY ERR", sc->debug.stats.rxstats.decrypt_busy_err); PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN); PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING); PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY); PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE); PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH); PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR); PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE); PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR); PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING); PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY); PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL); PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL); PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP); PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE); PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART); PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT); PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING); PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC); PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL); PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE); PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART); PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL); PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP); PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR); PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL); PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL); len += snprintf(buf + len, size - len, "%22s : %10u\n", "RX-Pkts-All", sc->debug.stats.rxstats.rx_pkts_all); len += snprintf(buf + len, size - len, "%22s : %10u\n", "RX-Bytes-All", sc->debug.stats.rxstats.rx_bytes_all); if (len > size) len = size; retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return retval; #undef PHY_ERR } void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) { #define RX_STAT_INC(c) sc->debug.stats.rxstats.c++ #define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++ #define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\ [sc->debug.rsidx].c) RX_STAT_INC(rx_pkts_all); sc->debug.stats.rxstats.rx_bytes_all += rs->rs_datalen; if (rs->rs_status & ATH9K_RXERR_CRC) RX_STAT_INC(crc_err); if (rs->rs_status & ATH9K_RXERR_DECRYPT) RX_STAT_INC(decrypt_crc_err); if (rs->rs_status & ATH9K_RXERR_MIC) RX_STAT_INC(mic_err); if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE) RX_STAT_INC(pre_delim_crc_err); if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST) RX_STAT_INC(post_delim_crc_err); if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY) RX_STAT_INC(decrypt_busy_err); if (rs->rs_status & ATH9K_RXERR_PHY) { RX_STAT_INC(phy_err); if (rs->rs_phyerr < ATH9K_PHYERR_MAX) RX_PHY_ERR_INC(rs->rs_phyerr); } #ifdef CONFIG_ATH9K_MAC_DEBUG spin_lock(&sc->debug.samp_lock); RX_SAMP_DBG(jiffies) = jiffies; RX_SAMP_DBG(rssi_ctl0) = rs->rs_rssi_ctl0; RX_SAMP_DBG(rssi_ctl1) = rs->rs_rssi_ctl1; RX_SAMP_DBG(rssi_ctl2) = rs->rs_rssi_ctl2; RX_SAMP_DBG(rssi_ext0) = rs->rs_rssi_ext0; RX_SAMP_DBG(rssi_ext1) = rs->rs_rssi_ext1; RX_SAMP_DBG(rssi_ext2) = rs->rs_rssi_ext2; RX_SAMP_DBG(antenna) = rs->rs_antenna; RX_SAMP_DBG(rssi) = rs->rs_rssi; RX_SAMP_DBG(rate) = rs->rs_rate; RX_SAMP_DBG(is_mybeacon) = rs->is_mybeacon; sc->debug.rsidx = (sc->debug.rsidx + 1) % ATH_DBG_MAX_SAMPLES; spin_unlock(&sc->debug.samp_lock); #endif #undef RX_STAT_INC #undef RX_PHY_ERR_INC #undef RX_SAMP_DBG } static const struct file_operations fops_recv = { .read = read_file_recv, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t read_file_regidx(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; char buf[32]; unsigned int len; len = sprintf(buf, "0x%08x\n", sc->debug.regidx); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_regidx(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; unsigned long regidx; char buf[32]; ssize_t len; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (strict_strtoul(buf, 0, &regidx)) return -EINVAL; sc->debug.regidx = regidx; return count; } static const struct file_operations fops_regidx = { .read = read_file_regidx, .write = write_file_regidx, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t read_file_regval(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_hw *ah = sc->sc_ah; char buf[32]; unsigned int len; u32 regval; ath9k_ps_wakeup(sc); regval = REG_READ_D(ah, sc->debug.regidx); ath9k_ps_restore(sc); len = sprintf(buf, "0x%08x\n", regval); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_regval(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_hw *ah = sc->sc_ah; unsigned long regval; char buf[32]; ssize_t len; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (strict_strtoul(buf, 0, &regval)) return -EINVAL; ath9k_ps_wakeup(sc); REG_WRITE_D(ah, sc->debug.regidx, regval); ath9k_ps_restore(sc); return count; } static const struct file_operations fops_regval = { .read = read_file_regval, .write = write_file_regval, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; #define REGDUMP_LINE_SIZE 20 static int open_file_regdump(struct inode *inode, struct file *file) { struct ath_softc *sc = inode->i_private; unsigned int len = 0; u8 *buf; int i; unsigned long num_regs, regdump_len, max_reg_offset; max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x16bd4 : 0xb500; num_regs = max_reg_offset / 4 + 1; regdump_len = num_regs * REGDUMP_LINE_SIZE + 1; buf = vmalloc(regdump_len); if (!buf) return -ENOMEM; ath9k_ps_wakeup(sc); for (i = 0; i < num_regs; i++) len += scnprintf(buf + len, regdump_len - len, "0x%06x 0x%08x\n", i << 2, REG_READ(sc->sc_ah, i << 2)); ath9k_ps_restore(sc); file->private_data = buf; return 0; } static const struct file_operations fops_regdump = { .open = open_file_regdump, .read = ath9k_debugfs_read_buf, .release = ath9k_debugfs_release_buf, .owner = THIS_MODULE, .llseek = default_llseek,/* read accesses f_pos */ }; static ssize_t read_file_dump_nfcal(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_hw *ah = sc->sc_ah; struct ath9k_nfcal_hist *h = sc->caldata.nfCalHist; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_conf *conf = &common->hw->conf; u32 len = 0, size = 1500; u32 i, j; ssize_t retval = 0; char *buf; u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; u8 nread; buf = kzalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; len += snprintf(buf + len, size - len, "Channel Noise Floor : %d\n", ah->noise); len += snprintf(buf + len, size - len, "Chain | privNF | # Readings | NF Readings\n"); for (i = 0; i < NUM_NF_READINGS; i++) { if (!(chainmask & (1 << i)) || ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))) continue; nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - h[i].invalidNFcount; len += snprintf(buf + len, size - len, " %d\t %d\t %d\t\t", i, h[i].privNF, nread); for (j = 0; j < nread; j++) len += snprintf(buf + len, size - len, " %d", h[i].nfCalBuffer[j]); len += snprintf(buf + len, size - len, "\n"); } if (len > size) len = size; retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return retval; } static const struct file_operations fops_dump_nfcal = { .read = read_file_dump_nfcal, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_hw *ah = sc->sc_ah; u32 len = 0, size = 1500; ssize_t retval = 0; char *buf; buf = kzalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; len = ah->eep_ops->dump_eeprom(ah, true, buf, len, size); retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return retval; } static const struct file_operations fops_base_eeprom = { .read = read_file_base_eeprom, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_hw *ah = sc->sc_ah; u32 len = 0, size = 6000; char *buf; size_t retval; buf = kzalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; len = ah->eep_ops->dump_eeprom(ah, false, buf, len, size); retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return retval; } static const struct file_operations fops_modal_eeprom = { .read = read_file_modal_eeprom, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; #ifdef CONFIG_ATH9K_MAC_DEBUG void ath9k_debug_samp_bb_mac(struct ath_softc *sc) { #define ATH_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].c) struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); unsigned long flags; int i; ath9k_ps_wakeup(sc); spin_lock_bh(&sc->debug.samp_lock); spin_lock_irqsave(&common->cc_lock, flags); ath_hw_cycle_counters_update(common); ATH_SAMP_DBG(cc.cycles) = common->cc_ani.cycles; ATH_SAMP_DBG(cc.rx_busy) = common->cc_ani.rx_busy; ATH_SAMP_DBG(cc.rx_frame) = common->cc_ani.rx_frame; ATH_SAMP_DBG(cc.tx_frame) = common->cc_ani.tx_frame; spin_unlock_irqrestore(&common->cc_lock, flags); ATH_SAMP_DBG(noise) = ah->noise; REG_WRITE_D(ah, AR_MACMISC, ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) | (AR_MACMISC_MISC_OBS_BUS_1 << AR_MACMISC_MISC_OBS_BUS_MSB_S))); for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) ATH_SAMP_DBG(dma_dbg_reg_vals[i]) = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32))); ATH_SAMP_DBG(pcu_obs) = REG_READ_D(ah, AR_OBS_BUS_1); ATH_SAMP_DBG(pcu_cr) = REG_READ_D(ah, AR_CR); memcpy(ATH_SAMP_DBG(nfCalHist), sc->caldata.nfCalHist, sizeof(ATH_SAMP_DBG(nfCalHist))); sc->debug.sampidx = (sc->debug.sampidx + 1) % ATH_DBG_MAX_SAMPLES; spin_unlock_bh(&sc->debug.samp_lock); ath9k_ps_restore(sc); #undef ATH_SAMP_DBG } static int open_file_bb_mac_samps(struct inode *inode, struct file *file) { #define ATH_SAMP_DBG(c) bb_mac_samp[sampidx].c struct ath_softc *sc = inode->i_private; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_conf *conf = &common->hw->conf; struct ath_dbg_bb_mac_samp *bb_mac_samp; struct ath9k_nfcal_hist *h; int i, j, qcuOffset = 0, dcuOffset = 0; u32 *qcuBase, *dcuBase, size = 30000, len = 0; u32 sampidx = 0; u8 *buf; u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; u8 nread; if (sc->sc_flags & SC_OP_INVALID) return -EAGAIN; buf = vmalloc(size); if (!buf) return -ENOMEM; bb_mac_samp = vmalloc(sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES); if (!bb_mac_samp) { vfree(buf); return -ENOMEM; } /* Account the current state too */ ath9k_debug_samp_bb_mac(sc); spin_lock_bh(&sc->debug.samp_lock); memcpy(bb_mac_samp, sc->debug.bb_mac_samp, sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES); len += snprintf(buf + len, size - len, "Current Sample Index: %d\n", sc->debug.sampidx); spin_unlock_bh(&sc->debug.samp_lock); len += snprintf(buf + len, size - len, "Raw DMA Debug Dump:\n"); len += snprintf(buf + len, size - len, "Sample |\t"); for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) len += snprintf(buf + len, size - len, " DMA Reg%d |\t", i); len += snprintf(buf + len, size - len, "\n"); for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { len += snprintf(buf + len, size - len, "%d\t", sampidx); for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) len += snprintf(buf + len, size - len, " %08x\t", ATH_SAMP_DBG(dma_dbg_reg_vals[i])); len += snprintf(buf + len, size - len, "\n"); } len += snprintf(buf + len, size - len, "\n"); len += snprintf(buf + len, size - len, "Sample Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n"); for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]); dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]); for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) { if (i == 8) { qcuOffset = 0; qcuBase++; } if (i == 6) { dcuOffset = 0; dcuBase++; } if (!sc->debug.stats.txstats[i].queued) continue; len += snprintf(buf + len, size - len, "%4d %7d %2x %1x %2x %2x\n", sampidx, i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset, (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3), ATH_SAMP_DBG(dma_dbg_reg_vals[2]) & (0x7 << (i * 3)) >> (i * 3), (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset); } len += snprintf(buf + len, size - len, "\n"); } len += snprintf(buf + len, size - len, "samp qcu_sh qcu_fh qcu_comp dcu_comp dcu_arb dcu_fp " "ch_idle_dur ch_idle_dur_val txfifo_val0 txfifo_val1 " "txfifo_dcu0 txfifo_dcu1 pcu_obs AR_CR\n"); for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]); dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]); len += snprintf(buf + len, size - len, "%4d %5x %5x ", sampidx, (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x003c0000) >> 18, (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x03c00000) >> 22); len += snprintf(buf + len, size - len, "%7x %8x ", (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x1c000000) >> 26, (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x3)); len += snprintf(buf + len, size - len, "%7x %7x ", (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x06000000) >> 25, (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x38000000) >> 27); len += snprintf(buf + len, size - len, "%7d %12d ", (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x000003fc) >> 2, (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000400) >> 10); len += snprintf(buf + len, size - len, "%12d %12d ", (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000800) >> 11, (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00001000) >> 12); len += snprintf(buf + len, size - len, "%12d %12d ", (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x0001e000) >> 13, (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x001e0000) >> 17); len += snprintf(buf + len, size - len, "0x%07x 0x%07x\n", ATH_SAMP_DBG(pcu_obs), ATH_SAMP_DBG(pcu_cr)); } len += snprintf(buf + len, size - len, "Sample ChNoise Chain privNF #Reading Readings\n"); for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { h = ATH_SAMP_DBG(nfCalHist); if (!ATH_SAMP_DBG(noise)) continue; for (i = 0; i < NUM_NF_READINGS; i++) { if (!(chainmask & (1 << i)) || ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))) continue; nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - h[i].invalidNFcount; len += snprintf(buf + len, size - len, "%4d %5d %4d\t %d\t %d\t", sampidx, ATH_SAMP_DBG(noise), i, h[i].privNF, nread); for (j = 0; j < nread; j++) len += snprintf(buf + len, size - len, " %d", h[i].nfCalBuffer[j]); len += snprintf(buf + len, size - len, "\n"); } } len += snprintf(buf + len, size - len, "\nCycle counters:\n" "Sample Total Rxbusy Rxframes Txframes\n"); for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { if (!ATH_SAMP_DBG(cc.cycles)) continue; len += snprintf(buf + len, size - len, "%4d %08x %08x %08x %08x\n", sampidx, ATH_SAMP_DBG(cc.cycles), ATH_SAMP_DBG(cc.rx_busy), ATH_SAMP_DBG(cc.rx_frame), ATH_SAMP_DBG(cc.tx_frame)); } len += snprintf(buf + len, size - len, "Tx status Dump :\n"); len += snprintf(buf + len, size - len, "Sample rssi:- ctl0 ctl1 ctl2 ext0 ext1 ext2 comb " "isok rts_fail data_fail rate tid qid " "ba_low ba_high tx_before(ms)\n"); for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) { if (!ATH_SAMP_DBG(ts[i].jiffies)) continue; len += snprintf(buf + len, size - len, "%-14d" "%-4d %-4d %-4d %-4d %-4d %-4d %-4d %-4d %-8d " "%-9d %-4d %-3d %-3d %08x %08x %-11d\n", sampidx, ATH_SAMP_DBG(ts[i].rssi_ctl0), ATH_SAMP_DBG(ts[i].rssi_ctl1), ATH_SAMP_DBG(ts[i].rssi_ctl2), ATH_SAMP_DBG(ts[i].rssi_ext0), ATH_SAMP_DBG(ts[i].rssi_ext1), ATH_SAMP_DBG(ts[i].rssi_ext2), ATH_SAMP_DBG(ts[i].rssi), ATH_SAMP_DBG(ts[i].isok), ATH_SAMP_DBG(ts[i].rts_fail_cnt), ATH_SAMP_DBG(ts[i].data_fail_cnt), ATH_SAMP_DBG(ts[i].rateindex), ATH_SAMP_DBG(ts[i].tid), ATH_SAMP_DBG(ts[i].qid), ATH_SAMP_DBG(ts[i].ba_low), ATH_SAMP_DBG(ts[i].ba_high), jiffies_to_msecs(jiffies - ATH_SAMP_DBG(ts[i].jiffies))); } } len += snprintf(buf + len, size - len, "Rx status Dump :\n"); len += snprintf(buf + len, size - len, "Sample rssi:- ctl0 ctl1 ctl2 " "ext0 ext1 ext2 comb beacon ant rate rx_before(ms)\n"); for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) { if (!ATH_SAMP_DBG(rs[i].jiffies)) continue; len += snprintf(buf + len, size - len, "%-14d" "%-4d %-4d %-4d %-4d %-4d %-4d %-4d %-9s %-2d %02x %-13d\n", sampidx, ATH_SAMP_DBG(rs[i].rssi_ctl0), ATH_SAMP_DBG(rs[i].rssi_ctl1), ATH_SAMP_DBG(rs[i].rssi_ctl2), ATH_SAMP_DBG(rs[i].rssi_ext0), ATH_SAMP_DBG(rs[i].rssi_ext1), ATH_SAMP_DBG(rs[i].rssi_ext2), ATH_SAMP_DBG(rs[i].rssi), ATH_SAMP_DBG(rs[i].is_mybeacon) ? "True" : "False", ATH_SAMP_DBG(rs[i].antenna), ATH_SAMP_DBG(rs[i].rate), jiffies_to_msecs(jiffies - ATH_SAMP_DBG(rs[i].jiffies))); } } vfree(bb_mac_samp); file->private_data = buf; return 0; #undef ATH_SAMP_DBG } static const struct file_operations fops_samps = { .open = open_file_bb_mac_samps, .read = ath9k_debugfs_read_buf, .release = ath9k_debugfs_release_buf, .owner = THIS_MODULE, .llseek = default_llseek, }; #endif int ath9k_init_debug(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); struct ath_softc *sc = (struct ath_softc *) common->priv; sc->debug.debugfs_phy = debugfs_create_dir("ath9k", sc->hw->wiphy->debugfsdir); if (!sc->debug.debugfs_phy) return -ENOMEM; #ifdef CONFIG_ATH_DEBUG debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_debug); #endif ath9k_dfs_init_debug(sc); debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_dma); debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_interrupt); debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_xmit); debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_stations); debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_misc); debugfs_create_file("reset", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_reset); debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_recv); debugfs_create_file("rx_chainmask", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_rx_chainmask); debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_tx_chainmask); debugfs_create_file("disable_ani", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_disable_ani); debugfs_create_file("regidx", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_regidx); debugfs_create_file("regval", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_regval); debugfs_create_bool("ignore_extcca", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, &ah->config.cwm_ignore_extcca); debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_regdump); debugfs_create_file("dump_nfcal", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_dump_nfcal); debugfs_create_file("base_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_base_eeprom); debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_modal_eeprom); #ifdef CONFIG_ATH9K_MAC_DEBUG debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_samps); #endif debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask); debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, &sc->sc_ah->gpio_val); return 0; }
gpl-2.0
tweezy23/kernel-msm
drivers/scsi/qla2xxx/qla_dbg.c
4768
82337
/* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ /* * Table for showing the current message id in use for particular level * Change this table for addition of log/debug messages. * ---------------------------------------------------------------------- * | Level | Last Value Used | Holes | * ---------------------------------------------------------------------- * | Module Init and Probe | 0x0120 | 0x4b,0xba,0xfa | * | Mailbox commands | 0x113e | 0x112c-0x112e | * | | | 0x113a | * | Device Discovery | 0x2086 | 0x2020-0x2022 | * | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 | * | | | 0x302d-0x302e | * | DPC Thread | 0x401c | | * | Async Events | 0x505d | 0x502b-0x502f | * | | | 0x5047,0x5052 | * | Timer Routines | 0x6011 | 0x600e-0x600f | * | User Space Interactions | 0x709f | 0x7018,0x702e, | * | | | 0x7039,0x7045, | * | | | 0x7073-0x7075, | * | | | 0x708c | * | Task Management | 0x803c | 0x8025-0x8026 | * | | | 0x800b,0x8039 | * | AER/EEH | 0x900f | | * | Virtual Port | 0xa007 | | * | ISP82XX Specific | 0xb054 | 0xb053 | * | MultiQ | 0xc00c | | * | Misc | 0xd010 | | * ---------------------------------------------------------------------- */ #include "qla_def.h" #include <linux/delay.h> static uint32_t ql_dbg_offset = 0x800; static inline void qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump) { fw_dump->fw_major_version = htonl(ha->fw_major_version); fw_dump->fw_minor_version = htonl(ha->fw_minor_version); fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version); fw_dump->fw_attributes = htonl(ha->fw_attributes); fw_dump->vendor = htonl(ha->pdev->vendor); fw_dump->device = htonl(ha->pdev->device); fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor); fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device); } static inline void * qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr) { struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; /* Request queue. */ memcpy(ptr, req->ring, req->length * sizeof(request_t)); /* Response queue. */ ptr += req->length * sizeof(request_t); memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t)); return ptr + (rsp->length * sizeof(response_t)); } static int qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, uint32_t ram_dwords, void **nxt) { int rval; uint32_t cnt, stat, timer, dwords, idx; uint16_t mb0; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; dma_addr_t dump_dma = ha->gid_list_dma; uint32_t *dump = (uint32_t *)ha->gid_list; rval = QLA_SUCCESS; mb0 = 0; WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); dwords = qla2x00_gid_list_size(ha) / 4; for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS; cnt += dwords, addr += dwords) { if (cnt + dwords > ram_dwords) dwords = ram_dwords - cnt; WRT_REG_WORD(&reg->mailbox1, LSW(addr)); WRT_REG_WORD(&reg->mailbox8, MSW(addr)); WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma)); WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma)); WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma))); WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma))); WRT_REG_WORD(&reg->mailbox4, MSW(dwords)); WRT_REG_WORD(&reg->mailbox5, LSW(dwords)); WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT); for (timer = 6000000; timer; timer--) { /* Check for pending interrupts. */ stat = RD_REG_DWORD(&reg->host_status); if (stat & HSRX_RISC_INT) { stat &= 0xff; if (stat == 0x1 || stat == 0x2 || stat == 0x10 || stat == 0x11) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); mb0 = RD_REG_WORD(&reg->mailbox0); WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD(&reg->hccr); break; } /* Clear this intr; it wasn't a mailbox intr */ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD(&reg->hccr); } udelay(5); } if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { rval = mb0 & MBS_MASK; for (idx = 0; idx < dwords; idx++) ram[cnt + idx] = swab32(dump[idx]); } else { rval = QLA_FUNCTION_FAILED; } } *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL; return rval; } static int qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram, uint32_t cram_size, void **nxt) { int rval; /* Code RAM. */ rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt); if (rval != QLA_SUCCESS) return rval; /* External Memory. */ return qla24xx_dump_ram(ha, 0x100000, *nxt, ha->fw_memory_size - 0x100000 + 1, nxt); } static uint32_t * qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, uint32_t count, uint32_t *buf) { uint32_t __iomem *dmp_reg; WRT_REG_DWORD(&reg->iobase_addr, iobase); dmp_reg = &reg->iobase_window; while (count--) *buf++ = htonl(RD_REG_DWORD(dmp_reg++)); return buf; } static inline int qla24xx_pause_risc(struct device_reg_24xx __iomem *reg) { int rval = QLA_SUCCESS; uint32_t cnt; WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE); for (cnt = 30000; ((RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED) == 0) && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } return rval; } static int qla24xx_soft_reset(struct qla_hw_data *ha) { int rval = QLA_SUCCESS; uint32_t cnt; uint16_t mb0, wd; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; /* Reset RISC. */ WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); for (cnt = 0; cnt < 30000; cnt++) { if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0) break; udelay(10); } WRT_REG_DWORD(&reg->ctrl_status, CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); udelay(100); /* Wait for firmware to complete NVRAM accesses. */ mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0); for (cnt = 10000 ; cnt && mb0; cnt--) { udelay(5); mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0); barrier(); } /* Wait for soft-reset to complete. */ for (cnt = 0; cnt < 30000; cnt++) { if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET) == 0) break; udelay(10); } WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET); RD_REG_DWORD(&reg->hccr); /* PCI Posting. */ for (cnt = 30000; RD_REG_WORD(&reg->mailbox0) != 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } return rval; } static int qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram, uint32_t ram_words, void **nxt) { int rval; uint32_t cnt, stat, timer, words, idx; uint16_t mb0; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; dma_addr_t dump_dma = ha->gid_list_dma; uint16_t *dump = (uint16_t *)ha->gid_list; rval = QLA_SUCCESS; mb0 = 0; WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); words = qla2x00_gid_list_size(ha) / 2; for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS; cnt += words, addr += words) { if (cnt + words > ram_words) words = ram_words - cnt; WRT_MAILBOX_REG(ha, reg, 1, LSW(addr)); WRT_MAILBOX_REG(ha, reg, 8, MSW(addr)); WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma)); WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma)); WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma))); WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma))); WRT_MAILBOX_REG(ha, reg, 4, words); WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT); for (timer = 6000000; timer; timer--) { /* Check for pending interrupts. */ stat = RD_REG_DWORD(&reg->u.isp2300.host_status); if (stat & HSR_RISC_INT) { stat &= 0xff; if (stat == 0x1 || stat == 0x2) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); mb0 = RD_MAILBOX_REG(ha, reg, 0); /* Release mailbox registers. */ WRT_REG_WORD(&reg->semaphore, 0); WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD(&reg->hccr); break; } else if (stat == 0x10 || stat == 0x11) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); mb0 = RD_MAILBOX_REG(ha, reg, 0); WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD(&reg->hccr); break; } /* clear this intr; it wasn't a mailbox intr */ WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD(&reg->hccr); } udelay(5); } if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { rval = mb0 & MBS_MASK; for (idx = 0; idx < words; idx++) ram[cnt + idx] = swab16(dump[idx]); } else { rval = QLA_FUNCTION_FAILED; } } *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL; return rval; } static inline void qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count, uint16_t *buf) { uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd; while (count--) *buf++ = htons(RD_REG_WORD(dmp_reg++)); } static inline void * qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr) { if (!ha->eft) return ptr; memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size)); return ptr + ntohl(ha->fw_dump->eft_size); } static inline void * qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) { uint32_t cnt; uint32_t *iter_reg; struct qla2xxx_fce_chain *fcec = ptr; if (!ha->fce) return ptr; *last_chain = &fcec->type; fcec->type = __constant_htonl(DUMP_CHAIN_FCE); fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) + fce_calc_size(ha->fce_bufs)); fcec->size = htonl(fce_calc_size(ha->fce_bufs)); fcec->addr_l = htonl(LSD(ha->fce_dma)); fcec->addr_h = htonl(MSD(ha->fce_dma)); iter_reg = fcec->eregs; for (cnt = 0; cnt < 8; cnt++) *iter_reg++ = htonl(ha->fce_mb[cnt]); memcpy(iter_reg, ha->fce, ntohl(fcec->size)); return (char *)iter_reg + ntohl(fcec->size); } static inline void * qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) { struct qla2xxx_mqueue_chain *q; struct qla2xxx_mqueue_header *qh; struct req_que *req; struct rsp_que *rsp; int que; if (!ha->mqenable) return ptr; /* Request queues */ for (que = 1; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; if (!req) break; /* Add chain. */ q = ptr; *last_chain = &q->type; q->type = __constant_htonl(DUMP_CHAIN_QUEUE); q->chain_size = htonl( sizeof(struct qla2xxx_mqueue_chain) + sizeof(struct qla2xxx_mqueue_header) + (req->length * sizeof(request_t))); ptr += sizeof(struct qla2xxx_mqueue_chain); /* Add header. */ qh = ptr; qh->queue = __constant_htonl(TYPE_REQUEST_QUEUE); qh->number = htonl(que); qh->size = htonl(req->length * sizeof(request_t)); ptr += sizeof(struct qla2xxx_mqueue_header); /* Add data. */ memcpy(ptr, req->ring, req->length * sizeof(request_t)); ptr += req->length * sizeof(request_t); } /* Response queues */ for (que = 1; que < ha->max_rsp_queues; que++) { rsp = ha->rsp_q_map[que]; if (!rsp) break; /* Add chain. */ q = ptr; *last_chain = &q->type; q->type = __constant_htonl(DUMP_CHAIN_QUEUE); q->chain_size = htonl( sizeof(struct qla2xxx_mqueue_chain) + sizeof(struct qla2xxx_mqueue_header) + (rsp->length * sizeof(response_t))); ptr += sizeof(struct qla2xxx_mqueue_chain); /* Add header. */ qh = ptr; qh->queue = __constant_htonl(TYPE_RESPONSE_QUEUE); qh->number = htonl(que); qh->size = htonl(rsp->length * sizeof(response_t)); ptr += sizeof(struct qla2xxx_mqueue_header); /* Add data. */ memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t)); ptr += rsp->length * sizeof(response_t); } return ptr; } static inline void * qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) { uint32_t cnt, que_idx; uint8_t que_cnt; struct qla2xxx_mq_chain *mq = ptr; struct device_reg_25xxmq __iomem *reg; if (!ha->mqenable || IS_QLA83XX(ha)) return ptr; mq = ptr; *last_chain = &mq->type; mq->type = __constant_htonl(DUMP_CHAIN_MQ); mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain)); que_cnt = ha->max_req_queues > ha->max_rsp_queues ? ha->max_req_queues : ha->max_rsp_queues; mq->count = htonl(que_cnt); for (cnt = 0; cnt < que_cnt; cnt++) { reg = (struct device_reg_25xxmq *) ((void *) ha->mqiobase + cnt * QLA_QUE_PAGE); que_idx = cnt * 4; mq->qregs[que_idx] = htonl(RD_REG_DWORD(&reg->req_q_in)); mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(&reg->req_q_out)); mq->qregs[que_idx+2] = htonl(RD_REG_DWORD(&reg->rsp_q_in)); mq->qregs[que_idx+3] = htonl(RD_REG_DWORD(&reg->rsp_q_out)); } return ptr + sizeof(struct qla2xxx_mq_chain); } void qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) { struct qla_hw_data *ha = vha->hw; if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xd000, "Failed to dump firmware (%x).\n", rval); ha->fw_dumped = 0; } else { ql_log(ql_log_info, vha, 0xd001, "Firmware dump saved to temp buffer (%ld/%p).\n", vha->host_no, ha->fw_dump); ha->fw_dumped = 1; qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); } } /** * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. * @ha: HA context * @hardware_locked: Called with the hardware_lock */ void qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { int rval; uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; uint16_t __iomem *dmp_reg; unsigned long flags; struct qla2300_fw_dump *fw; void *nxt; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); flags = 0; if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd002, "No buffer available for dump.\n"); goto qla2300_fw_dump_failed; } if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xd003, "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); goto qla2300_fw_dump_failed; } fw = &ha->fw_dump->isp.isp23; qla2xxx_prep_dump(ha, ha->fw_dump); rval = QLA_SUCCESS; fw->hccr = htons(RD_REG_WORD(&reg->hccr)); /* Pause RISC. */ WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC); if (IS_QLA2300(ha)) { for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } } else { RD_REG_WORD(&reg->hccr); /* PCI Posting. */ udelay(10); } if (rval == QLA_SUCCESS) { dmp_reg = &reg->flash_address; for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); dmp_reg = &reg->u.isp2300.req_q_in; for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++) fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); dmp_reg = &reg->u.isp2300.mailbox0; for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); WRT_REG_WORD(&reg->ctrl_status, 0x40); qla2xxx_read_window(reg, 32, fw->resp_dma_reg); WRT_REG_WORD(&reg->ctrl_status, 0x50); qla2xxx_read_window(reg, 48, fw->dma_reg); WRT_REG_WORD(&reg->ctrl_status, 0x00); dmp_reg = &reg->risc_hw; for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); WRT_REG_WORD(&reg->pcr, 0x2000); qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); WRT_REG_WORD(&reg->pcr, 0x2200); qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); WRT_REG_WORD(&reg->pcr, 0x2400); qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); WRT_REG_WORD(&reg->pcr, 0x2600); qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); WRT_REG_WORD(&reg->pcr, 0x2800); qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); WRT_REG_WORD(&reg->pcr, 0x2A00); qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); WRT_REG_WORD(&reg->pcr, 0x2C00); qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); WRT_REG_WORD(&reg->pcr, 0x2E00); qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); WRT_REG_WORD(&reg->ctrl_status, 0x10); qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg); WRT_REG_WORD(&reg->ctrl_status, 0x20); qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); WRT_REG_WORD(&reg->ctrl_status, 0x30); qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); /* Reset RISC. */ WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET); for (cnt = 0; cnt < 30000; cnt++) { if ((RD_REG_WORD(&reg->ctrl_status) & CSR_ISP_SOFT_RESET) == 0) break; udelay(10); } } if (!IS_QLA2300(ha)) { for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } } /* Get RISC SRAM. */ if (rval == QLA_SUCCESS) rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram, sizeof(fw->risc_ram) / 2, &nxt); /* Get stack SRAM. */ if (rval == QLA_SUCCESS) rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram, sizeof(fw->stack_ram) / 2, &nxt); /* Get data SRAM. */ if (rval == QLA_SUCCESS) rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram, ha->fw_memory_size - 0x11000 + 1, &nxt); if (rval == QLA_SUCCESS) qla2xxx_copy_queues(ha, nxt); qla2xxx_dump_post_process(base_vha, rval); qla2300_fw_dump_failed: if (!hardware_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); } /** * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware. * @ha: HA context * @hardware_locked: Called with the hardware_lock */ void qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { int rval; uint32_t cnt, timer; uint16_t risc_address; uint16_t mb0, mb2; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; uint16_t __iomem *dmp_reg; unsigned long flags; struct qla2100_fw_dump *fw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); risc_address = 0; mb0 = mb2 = 0; flags = 0; if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd004, "No buffer available for dump.\n"); goto qla2100_fw_dump_failed; } if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xd005, "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); goto qla2100_fw_dump_failed; } fw = &ha->fw_dump->isp.isp21; qla2xxx_prep_dump(ha, ha->fw_dump); rval = QLA_SUCCESS; fw->hccr = htons(RD_REG_WORD(&reg->hccr)); /* Pause RISC. */ WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC); for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } if (rval == QLA_SUCCESS) { dmp_reg = &reg->flash_address; for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); dmp_reg = &reg->u.isp2100.mailbox0; for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (cnt == 8) dmp_reg = &reg->u_end.isp2200.mailbox8; fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); } dmp_reg = &reg->u.isp2100.unused_2[0]; for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++) fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); WRT_REG_WORD(&reg->ctrl_status, 0x00); dmp_reg = &reg->risc_hw; for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); WRT_REG_WORD(&reg->pcr, 0x2000); qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); WRT_REG_WORD(&reg->pcr, 0x2100); qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); WRT_REG_WORD(&reg->pcr, 0x2200); qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); WRT_REG_WORD(&reg->pcr, 0x2300); qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); WRT_REG_WORD(&reg->pcr, 0x2400); qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); WRT_REG_WORD(&reg->pcr, 0x2500); qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); WRT_REG_WORD(&reg->pcr, 0x2600); qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); WRT_REG_WORD(&reg->pcr, 0x2700); qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); WRT_REG_WORD(&reg->ctrl_status, 0x10); qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg); WRT_REG_WORD(&reg->ctrl_status, 0x20); qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); WRT_REG_WORD(&reg->ctrl_status, 0x30); qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); /* Reset the ISP. */ WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET); } for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } /* Pause RISC. */ if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) && (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) { WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC); for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } if (rval == QLA_SUCCESS) { /* Set memory configuration and timing. */ if (IS_QLA2100(ha)) WRT_REG_WORD(&reg->mctr, 0xf1); else WRT_REG_WORD(&reg->mctr, 0xf2); RD_REG_WORD(&reg->mctr); /* PCI Posting. */ /* Release RISC. */ WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC); } } if (rval == QLA_SUCCESS) { /* Get RISC SRAM. */ risc_address = 0x1000; WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); } for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS; cnt++, risc_address++) { WRT_MAILBOX_REG(ha, reg, 1, risc_address); WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT); for (timer = 6000000; timer != 0; timer--) { /* Check for pending interrupts. */ if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) { if (RD_REG_WORD(&reg->semaphore) & BIT_0) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); mb0 = RD_MAILBOX_REG(ha, reg, 0); mb2 = RD_MAILBOX_REG(ha, reg, 2); WRT_REG_WORD(&reg->semaphore, 0); WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD(&reg->hccr); break; } WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD(&reg->hccr); } udelay(5); } if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { rval = mb0 & MBS_MASK; fw->risc_ram[cnt] = htons(mb2); } else { rval = QLA_FUNCTION_FAILED; } } if (rval == QLA_SUCCESS) qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]); qla2xxx_dump_post_process(base_vha, rval); qla2100_fw_dump_failed: if (!hardware_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); } void qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { int rval; uint32_t cnt; uint32_t risc_address; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; uint32_t __iomem *dmp_reg; uint32_t *iter_reg; uint16_t __iomem *mbx_reg; unsigned long flags; struct qla24xx_fw_dump *fw; uint32_t ext_mem_cnt; void *nxt; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); if (IS_QLA82XX(ha)) return; risc_address = ext_mem_cnt = 0; flags = 0; if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd006, "No buffer available for dump.\n"); goto qla24xx_fw_dump_failed; } if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xd007, "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); goto qla24xx_fw_dump_failed; } fw = &ha->fw_dump->isp.isp24; qla2xxx_prep_dump(ha, ha->fw_dump); fw->host_status = htonl(RD_REG_DWORD(&reg->host_status)); /* Pause RISC. */ rval = qla24xx_pause_risc(reg); if (rval != QLA_SUCCESS) goto qla24xx_fw_dump_failed_0; /* Host interface registers. */ dmp_reg = &reg->flash_addr; for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); /* Disable interrupts. */ WRT_REG_DWORD(&reg->ictrl, 0); RD_REG_DWORD(&reg->ictrl); /* Shadow registers. */ WRT_REG_DWORD(&reg->iobase_addr, 0x0F70); RD_REG_DWORD(&reg->iobase_addr); WRT_REG_DWORD(&reg->iobase_select, 0xB0000000); fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0100000); fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0200000); fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0300000); fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0400000); fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0500000); fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0600000); fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); /* Mailbox registers. */ mbx_reg = &reg->mailbox0; for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); /* Transfer sequence registers. */ iter_reg = fw->xseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); qla24xx_read_window(reg, 0xBF70, 16, iter_reg); qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg); qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); /* Receive sequence registers. */ iter_reg = fw->rseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); qla24xx_read_window(reg, 0xFF70, 16, iter_reg); qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg); qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); /* Command DMA registers. */ qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); /* Queues. */ iter_reg = fw->req0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++) *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); iter_reg = fw->resp0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++) *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); iter_reg = fw->req1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++) *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); /* Transmit DMA registers. */ iter_reg = fw->xmt0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); qla24xx_read_window(reg, 0x7610, 16, iter_reg); iter_reg = fw->xmt1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); qla24xx_read_window(reg, 0x7630, 16, iter_reg); iter_reg = fw->xmt2_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); qla24xx_read_window(reg, 0x7650, 16, iter_reg); iter_reg = fw->xmt3_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); qla24xx_read_window(reg, 0x7670, 16, iter_reg); iter_reg = fw->xmt4_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); qla24xx_read_window(reg, 0x7690, 16, iter_reg); qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); /* Receive DMA registers. */ iter_reg = fw->rcvt0_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); qla24xx_read_window(reg, 0x7710, 16, iter_reg); iter_reg = fw->rcvt1_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); qla24xx_read_window(reg, 0x7730, 16, iter_reg); /* RISC registers. */ iter_reg = fw->risc_gp_reg; iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); qla24xx_read_window(reg, 0x0F70, 16, iter_reg); /* Local memory controller registers. */ iter_reg = fw->lmc_reg; iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); qla24xx_read_window(reg, 0x3060, 16, iter_reg); /* Fibre Protocol Module registers. */ iter_reg = fw->fpm_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); qla24xx_read_window(reg, 0x40B0, 16, iter_reg); /* Frame Buffer registers. */ iter_reg = fw->fb_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); qla24xx_read_window(reg, 0x61B0, 16, iter_reg); rval = qla24xx_soft_reset(ha); if (rval != QLA_SUCCESS) goto qla24xx_fw_dump_failed_0; rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), &nxt); if (rval != QLA_SUCCESS) goto qla24xx_fw_dump_failed_0; nxt = qla2xxx_copy_queues(ha, nxt); qla24xx_copy_eft(ha, nxt); qla24xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); qla24xx_fw_dump_failed: if (!hardware_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); } void qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { int rval; uint32_t cnt; uint32_t risc_address; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; uint32_t __iomem *dmp_reg; uint32_t *iter_reg; uint16_t __iomem *mbx_reg; unsigned long flags; struct qla25xx_fw_dump *fw; uint32_t ext_mem_cnt; void *nxt, *nxt_chain; uint32_t *last_chain = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); risc_address = ext_mem_cnt = 0; flags = 0; if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd008, "No buffer available for dump.\n"); goto qla25xx_fw_dump_failed; } if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xd009, "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); goto qla25xx_fw_dump_failed; } fw = &ha->fw_dump->isp.isp25; qla2xxx_prep_dump(ha, ha->fw_dump); ha->fw_dump->version = __constant_htonl(2); fw->host_status = htonl(RD_REG_DWORD(&reg->host_status)); /* Pause RISC. */ rval = qla24xx_pause_risc(reg); if (rval != QLA_SUCCESS) goto qla25xx_fw_dump_failed_0; /* Host/Risc registers. */ iter_reg = fw->host_risc_reg; iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); qla24xx_read_window(reg, 0x7010, 16, iter_reg); /* PCIe registers. */ WRT_REG_DWORD(&reg->iobase_addr, 0x7C00); RD_REG_DWORD(&reg->iobase_addr); WRT_REG_DWORD(&reg->iobase_window, 0x01); dmp_reg = &reg->iobase_c4; fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++)); fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window)); WRT_REG_DWORD(&reg->iobase_window, 0x00); RD_REG_DWORD(&reg->iobase_window); /* Host interface registers. */ dmp_reg = &reg->flash_addr; for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); /* Disable interrupts. */ WRT_REG_DWORD(&reg->ictrl, 0); RD_REG_DWORD(&reg->ictrl); /* Shadow registers. */ WRT_REG_DWORD(&reg->iobase_addr, 0x0F70); RD_REG_DWORD(&reg->iobase_addr); WRT_REG_DWORD(&reg->iobase_select, 0xB0000000); fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0100000); fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0200000); fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0300000); fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0400000); fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0500000); fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0600000); fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0700000); fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0800000); fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0900000); fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000); fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); /* RISC I/O register. */ WRT_REG_DWORD(&reg->iobase_addr, 0x0010); fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window)); /* Mailbox registers. */ mbx_reg = &reg->mailbox0; for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); /* Transfer sequence registers. */ iter_reg = fw->xseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); qla24xx_read_window(reg, 0xBF70, 16, iter_reg); iter_reg = fw->xseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); /* Receive sequence registers. */ iter_reg = fw->rseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); qla24xx_read_window(reg, 0xFF70, 16, iter_reg); iter_reg = fw->rseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); /* Auxiliary sequence registers. */ iter_reg = fw->aseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); qla24xx_read_window(reg, 0xB070, 16, iter_reg); iter_reg = fw->aseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); /* Command DMA registers. */ qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); /* Queues. */ iter_reg = fw->req0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++) *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); iter_reg = fw->resp0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++) *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); iter_reg = fw->req1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++) *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); /* Transmit DMA registers. */ iter_reg = fw->xmt0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); qla24xx_read_window(reg, 0x7610, 16, iter_reg); iter_reg = fw->xmt1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); qla24xx_read_window(reg, 0x7630, 16, iter_reg); iter_reg = fw->xmt2_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); qla24xx_read_window(reg, 0x7650, 16, iter_reg); iter_reg = fw->xmt3_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); qla24xx_read_window(reg, 0x7670, 16, iter_reg); iter_reg = fw->xmt4_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); qla24xx_read_window(reg, 0x7690, 16, iter_reg); qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); /* Receive DMA registers. */ iter_reg = fw->rcvt0_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); qla24xx_read_window(reg, 0x7710, 16, iter_reg); iter_reg = fw->rcvt1_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); qla24xx_read_window(reg, 0x7730, 16, iter_reg); /* RISC registers. */ iter_reg = fw->risc_gp_reg; iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); qla24xx_read_window(reg, 0x0F70, 16, iter_reg); /* Local memory controller registers. */ iter_reg = fw->lmc_reg; iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); qla24xx_read_window(reg, 0x3070, 16, iter_reg); /* Fibre Protocol Module registers. */ iter_reg = fw->fpm_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); qla24xx_read_window(reg, 0x40B0, 16, iter_reg); /* Frame Buffer registers. */ iter_reg = fw->fb_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); qla24xx_read_window(reg, 0x6F00, 16, iter_reg); /* Multi queue registers */ nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, &last_chain); rval = qla24xx_soft_reset(ha); if (rval != QLA_SUCCESS) goto qla25xx_fw_dump_failed_0; rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), &nxt); if (rval != QLA_SUCCESS) goto qla25xx_fw_dump_failed_0; nxt = qla2xxx_copy_queues(ha, nxt); nxt = qla24xx_copy_eft(ha, nxt); /* Chain entries -- started with MQ. */ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); if (last_chain) { ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); } /* Adjust valid length. */ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); qla25xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); qla25xx_fw_dump_failed: if (!hardware_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); } void qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { int rval; uint32_t cnt; uint32_t risc_address; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; uint32_t __iomem *dmp_reg; uint32_t *iter_reg; uint16_t __iomem *mbx_reg; unsigned long flags; struct qla81xx_fw_dump *fw; uint32_t ext_mem_cnt; void *nxt, *nxt_chain; uint32_t *last_chain = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); risc_address = ext_mem_cnt = 0; flags = 0; if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd00a, "No buffer available for dump.\n"); goto qla81xx_fw_dump_failed; } if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xd00b, "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); goto qla81xx_fw_dump_failed; } fw = &ha->fw_dump->isp.isp81; qla2xxx_prep_dump(ha, ha->fw_dump); fw->host_status = htonl(RD_REG_DWORD(&reg->host_status)); /* Pause RISC. */ rval = qla24xx_pause_risc(reg); if (rval != QLA_SUCCESS) goto qla81xx_fw_dump_failed_0; /* Host/Risc registers. */ iter_reg = fw->host_risc_reg; iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); qla24xx_read_window(reg, 0x7010, 16, iter_reg); /* PCIe registers. */ WRT_REG_DWORD(&reg->iobase_addr, 0x7C00); RD_REG_DWORD(&reg->iobase_addr); WRT_REG_DWORD(&reg->iobase_window, 0x01); dmp_reg = &reg->iobase_c4; fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++)); fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window)); WRT_REG_DWORD(&reg->iobase_window, 0x00); RD_REG_DWORD(&reg->iobase_window); /* Host interface registers. */ dmp_reg = &reg->flash_addr; for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); /* Disable interrupts. */ WRT_REG_DWORD(&reg->ictrl, 0); RD_REG_DWORD(&reg->ictrl); /* Shadow registers. */ WRT_REG_DWORD(&reg->iobase_addr, 0x0F70); RD_REG_DWORD(&reg->iobase_addr); WRT_REG_DWORD(&reg->iobase_select, 0xB0000000); fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0100000); fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0200000); fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0300000); fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0400000); fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0500000); fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0600000); fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0700000); fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0800000); fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0900000); fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000); fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); /* RISC I/O register. */ WRT_REG_DWORD(&reg->iobase_addr, 0x0010); fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window)); /* Mailbox registers. */ mbx_reg = &reg->mailbox0; for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); /* Transfer sequence registers. */ iter_reg = fw->xseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); qla24xx_read_window(reg, 0xBF70, 16, iter_reg); iter_reg = fw->xseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); /* Receive sequence registers. */ iter_reg = fw->rseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); qla24xx_read_window(reg, 0xFF70, 16, iter_reg); iter_reg = fw->rseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); /* Auxiliary sequence registers. */ iter_reg = fw->aseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); qla24xx_read_window(reg, 0xB070, 16, iter_reg); iter_reg = fw->aseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); /* Command DMA registers. */ qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); /* Queues. */ iter_reg = fw->req0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++) *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); iter_reg = fw->resp0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++) *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); iter_reg = fw->req1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++) *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); /* Transmit DMA registers. */ iter_reg = fw->xmt0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); qla24xx_read_window(reg, 0x7610, 16, iter_reg); iter_reg = fw->xmt1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); qla24xx_read_window(reg, 0x7630, 16, iter_reg); iter_reg = fw->xmt2_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); qla24xx_read_window(reg, 0x7650, 16, iter_reg); iter_reg = fw->xmt3_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); qla24xx_read_window(reg, 0x7670, 16, iter_reg); iter_reg = fw->xmt4_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); qla24xx_read_window(reg, 0x7690, 16, iter_reg); qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); /* Receive DMA registers. */ iter_reg = fw->rcvt0_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); qla24xx_read_window(reg, 0x7710, 16, iter_reg); iter_reg = fw->rcvt1_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); qla24xx_read_window(reg, 0x7730, 16, iter_reg); /* RISC registers. */ iter_reg = fw->risc_gp_reg; iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); qla24xx_read_window(reg, 0x0F70, 16, iter_reg); /* Local memory controller registers. */ iter_reg = fw->lmc_reg; iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); qla24xx_read_window(reg, 0x3070, 16, iter_reg); /* Fibre Protocol Module registers. */ iter_reg = fw->fpm_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg); qla24xx_read_window(reg, 0x40D0, 16, iter_reg); /* Frame Buffer registers. */ iter_reg = fw->fb_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg); qla24xx_read_window(reg, 0x6F00, 16, iter_reg); /* Multi queue registers */ nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, &last_chain); rval = qla24xx_soft_reset(ha); if (rval != QLA_SUCCESS) goto qla81xx_fw_dump_failed_0; rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), &nxt); if (rval != QLA_SUCCESS) goto qla81xx_fw_dump_failed_0; nxt = qla2xxx_copy_queues(ha, nxt); nxt = qla24xx_copy_eft(ha, nxt); /* Chain entries -- started with MQ. */ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); if (last_chain) { ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); } /* Adjust valid length. */ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); qla81xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); qla81xx_fw_dump_failed: if (!hardware_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); } void qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { int rval; uint32_t cnt, reg_data; uint32_t risc_address; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; uint32_t __iomem *dmp_reg; uint32_t *iter_reg; uint16_t __iomem *mbx_reg; unsigned long flags; struct qla83xx_fw_dump *fw; uint32_t ext_mem_cnt; void *nxt, *nxt_chain; uint32_t *last_chain = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); risc_address = ext_mem_cnt = 0; flags = 0; if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd00c, "No buffer available for dump!!!\n"); goto qla83xx_fw_dump_failed; } if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xd00d, "Firmware has been previously dumped (%p) -- ignoring " "request...\n", ha->fw_dump); goto qla83xx_fw_dump_failed; } fw = &ha->fw_dump->isp.isp83; qla2xxx_prep_dump(ha, ha->fw_dump); fw->host_status = htonl(RD_REG_DWORD(&reg->host_status)); /* Pause RISC. */ rval = qla24xx_pause_risc(reg); if (rval != QLA_SUCCESS) goto qla83xx_fw_dump_failed_0; WRT_REG_DWORD(&reg->iobase_addr, 0x6000); dmp_reg = &reg->iobase_window; reg_data = RD_REG_DWORD(dmp_reg); WRT_REG_DWORD(dmp_reg, 0); dmp_reg = &reg->unused_4_1[0]; reg_data = RD_REG_DWORD(dmp_reg); WRT_REG_DWORD(dmp_reg, 0); WRT_REG_DWORD(&reg->iobase_addr, 0x6010); dmp_reg = &reg->unused_4_1[2]; reg_data = RD_REG_DWORD(dmp_reg); WRT_REG_DWORD(dmp_reg, 0); /* select PCR and disable ecc checking and correction */ WRT_REG_DWORD(&reg->iobase_addr, 0x0F70); RD_REG_DWORD(&reg->iobase_addr); WRT_REG_DWORD(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */ /* Host/Risc registers. */ iter_reg = fw->host_risc_reg; iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg); qla24xx_read_window(reg, 0x7040, 16, iter_reg); /* PCIe registers. */ WRT_REG_DWORD(&reg->iobase_addr, 0x7C00); RD_REG_DWORD(&reg->iobase_addr); WRT_REG_DWORD(&reg->iobase_window, 0x01); dmp_reg = &reg->iobase_c4; fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++)); fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window)); WRT_REG_DWORD(&reg->iobase_window, 0x00); RD_REG_DWORD(&reg->iobase_window); /* Host interface registers. */ dmp_reg = &reg->flash_addr; for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); /* Disable interrupts. */ WRT_REG_DWORD(&reg->ictrl, 0); RD_REG_DWORD(&reg->ictrl); /* Shadow registers. */ WRT_REG_DWORD(&reg->iobase_addr, 0x0F70); RD_REG_DWORD(&reg->iobase_addr); WRT_REG_DWORD(&reg->iobase_select, 0xB0000000); fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0100000); fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0200000); fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0300000); fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0400000); fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0500000); fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0600000); fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0700000); fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0800000); fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0900000); fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000); fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata)); /* RISC I/O register. */ WRT_REG_DWORD(&reg->iobase_addr, 0x0010); fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window)); /* Mailbox registers. */ mbx_reg = &reg->mailbox0; for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); /* Transfer sequence registers. */ iter_reg = fw->xseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); qla24xx_read_window(reg, 0xBF70, 16, iter_reg); iter_reg = fw->xseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg); /* Receive sequence registers. */ iter_reg = fw->rseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); qla24xx_read_window(reg, 0xFF70, 16, iter_reg); iter_reg = fw->rseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg); /* Auxiliary sequence registers. */ iter_reg = fw->aseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg); qla24xx_read_window(reg, 0xB170, 16, iter_reg); iter_reg = fw->aseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg); /* Command DMA registers. */ iter_reg = fw->cmd_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg); qla24xx_read_window(reg, 0x71F0, 16, iter_reg); /* Queues. */ iter_reg = fw->req0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++) *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); iter_reg = fw->resp0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++) *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); iter_reg = fw->req1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++) *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); /* Transmit DMA registers. */ iter_reg = fw->xmt0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); qla24xx_read_window(reg, 0x7610, 16, iter_reg); iter_reg = fw->xmt1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); qla24xx_read_window(reg, 0x7630, 16, iter_reg); iter_reg = fw->xmt2_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); qla24xx_read_window(reg, 0x7650, 16, iter_reg); iter_reg = fw->xmt3_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); qla24xx_read_window(reg, 0x7670, 16, iter_reg); iter_reg = fw->xmt4_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); qla24xx_read_window(reg, 0x7690, 16, iter_reg); qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); /* Receive DMA registers. */ iter_reg = fw->rcvt0_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); qla24xx_read_window(reg, 0x7710, 16, iter_reg); iter_reg = fw->rcvt1_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); qla24xx_read_window(reg, 0x7730, 16, iter_reg); /* RISC registers. */ iter_reg = fw->risc_gp_reg; iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); qla24xx_read_window(reg, 0x0F70, 16, iter_reg); /* Local memory controller registers. */ iter_reg = fw->lmc_reg; iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); qla24xx_read_window(reg, 0x3070, 16, iter_reg); /* Fibre Protocol Module registers. */ iter_reg = fw->fpm_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg); qla24xx_read_window(reg, 0x40F0, 16, iter_reg); /* RQ0 Array registers. */ iter_reg = fw->rq0_array_reg; iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg); qla24xx_read_window(reg, 0x5CF0, 16, iter_reg); /* RQ1 Array registers. */ iter_reg = fw->rq1_array_reg; iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg); qla24xx_read_window(reg, 0x5DF0, 16, iter_reg); /* RP0 Array registers. */ iter_reg = fw->rp0_array_reg; iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg); qla24xx_read_window(reg, 0x5EF0, 16, iter_reg); /* RP1 Array registers. */ iter_reg = fw->rp1_array_reg; iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg); qla24xx_read_window(reg, 0x5FF0, 16, iter_reg); iter_reg = fw->at0_array_reg; iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg); qla24xx_read_window(reg, 0x70F0, 16, iter_reg); /* I/O Queue Control registers. */ qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg); /* Frame Buffer registers. */ iter_reg = fw->fb_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg); qla24xx_read_window(reg, 0x6F00, 16, iter_reg); /* Multi queue registers */ nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, &last_chain); rval = qla24xx_soft_reset(ha); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xd00e, "SOFT RESET FAILED, forcing continuation of dump!!!\n"); rval = QLA_SUCCESS; ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n"); WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET); RD_REG_DWORD(&reg->hccr); WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE); RD_REG_DWORD(&reg->hccr); WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET); RD_REG_DWORD(&reg->hccr); for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--) udelay(5); if (!cnt) { nxt = fw->code_ram; nxt += sizeof(fw->code_ram), nxt += (ha->fw_memory_size - 0x100000 + 1); goto copy_queue; } else ql_log(ql_log_warn, vha, 0xd010, "bigger hammer success?\n"); } rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), &nxt); if (rval != QLA_SUCCESS) goto qla83xx_fw_dump_failed_0; copy_queue: nxt = qla2xxx_copy_queues(ha, nxt); nxt = qla24xx_copy_eft(ha, nxt); /* Chain entries -- started with MQ. */ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); if (last_chain) { ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); } /* Adjust valid length. */ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); qla83xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); qla83xx_fw_dump_failed: if (!hardware_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); } /****************************************************************************/ /* Driver Debug Functions. */ /****************************************************************************/ static inline int ql_mask_match(uint32_t level) { if (ql2xextended_error_logging == 1) ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; return (level & ql2xextended_error_logging) == level; } /* * This function is for formatting and logging debug information. * It is to be used when vha is available. It formats the message * and logs it to the messages file. * parameters: * level: The level of the debug messages to be printed. * If ql2xextended_error_logging value is correctly set, * this message will appear in the messages file. * vha: Pointer to the scsi_qla_host_t. * id: This is a unique identifier for the level. It identifies the * part of the code from where the message originated. * msg: The message to be displayed. */ void ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...) { va_list va; struct va_format vaf; if (!ql_mask_match(level)) return; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; if (vha != NULL) { const struct pci_dev *pdev = vha->hw->pdev; /* <module-name> <pci-name> <msg-id>:<host> Message */ pr_warn("%s [%s]-%04x:%ld: %pV", QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, vha->host_no, &vaf); } else { pr_warn("%s [%s]-%04x: : %pV", QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf); } va_end(va); } /* * This function is for formatting and logging debug information. * It is to be used when vha is not available and pci is availble, * i.e., before host allocation. It formats the message and logs it * to the messages file. * parameters: * level: The level of the debug messages to be printed. * If ql2xextended_error_logging value is correctly set, * this message will appear in the messages file. * pdev: Pointer to the struct pci_dev. * id: This is a unique id for the level. It identifies the part * of the code from where the message originated. * msg: The message to be displayed. */ void ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, const char *fmt, ...) { va_list va; struct va_format vaf; if (pdev == NULL) return; if (!ql_mask_match(level)) return; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; /* <module-name> <dev-name>:<msg-id> Message */ pr_warn("%s [%s]-%04x: : %pV", QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf); va_end(va); } /* * This function is for formatting and logging log messages. * It is to be used when vha is available. It formats the message * and logs it to the messages file. All the messages will be logged * irrespective of value of ql2xextended_error_logging. * parameters: * level: The level of the log messages to be printed in the * messages file. * vha: Pointer to the scsi_qla_host_t * id: This is a unique id for the level. It identifies the * part of the code from where the message originated. * msg: The message to be displayed. */ void ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...) { va_list va; struct va_format vaf; char pbuf[128]; if (level > ql_errlev) return; if (vha != NULL) { const struct pci_dev *pdev = vha->hw->pdev; /* <module-name> <msg-id>:<host> Message */ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ", QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no); } else { snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", QL_MSGHDR, "0000:00:00.0", id); } pbuf[sizeof(pbuf) - 1] = 0; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; switch (level) { case ql_log_fatal: /* FATAL LOG */ pr_crit("%s%pV", pbuf, &vaf); break; case ql_log_warn: pr_err("%s%pV", pbuf, &vaf); break; case ql_log_info: pr_warn("%s%pV", pbuf, &vaf); break; default: pr_info("%s%pV", pbuf, &vaf); break; } va_end(va); } /* * This function is for formatting and logging log messages. * It is to be used when vha is not available and pci is availble, * i.e., before host allocation. It formats the message and logs * it to the messages file. All the messages are logged irrespective * of the value of ql2xextended_error_logging. * parameters: * level: The level of the log messages to be printed in the * messages file. * pdev: Pointer to the struct pci_dev. * id: This is a unique id for the level. It identifies the * part of the code from where the message originated. * msg: The message to be displayed. */ void ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, const char *fmt, ...) { va_list va; struct va_format vaf; char pbuf[128]; if (pdev == NULL) return; if (level > ql_errlev) return; /* <module-name> <dev-name>:<msg-id> Message */ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", QL_MSGHDR, dev_name(&(pdev->dev)), id); pbuf[sizeof(pbuf) - 1] = 0; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; switch (level) { case ql_log_fatal: /* FATAL LOG */ pr_crit("%s%pV", pbuf, &vaf); break; case ql_log_warn: pr_err("%s%pV", pbuf, &vaf); break; case ql_log_info: pr_warn("%s%pV", pbuf, &vaf); break; default: pr_info("%s%pV", pbuf, &vaf); break; } va_end(va); } void ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id) { int i; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; uint16_t __iomem *mbx_reg; if (!ql_mask_match(level)) return; if (IS_QLA82XX(ha)) mbx_reg = &reg82->mailbox_in[0]; else if (IS_FWI2_CAPABLE(ha)) mbx_reg = &reg24->mailbox0; else mbx_reg = MAILBOX_REG(ha, reg, 0); ql_dbg(level, vha, id, "Mailbox registers:\n"); for (i = 0; i < 6; i++) ql_dbg(level, vha, id, "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++)); } void ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id, uint8_t *b, uint32_t size) { uint32_t cnt; uint8_t c; if (!ql_mask_match(level)) return; ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 " "9 Ah Bh Ch Dh Eh Fh\n"); ql_dbg(level, vha, id, "----------------------------------" "----------------------------\n"); ql_dbg(level, vha, id, " "); for (cnt = 0; cnt < size;) { c = *b++; printk("%02x", (uint32_t) c); cnt++; if (!(cnt % 16)) printk("\n"); else printk(" "); } if (cnt % 16) ql_dbg(level, vha, id, "\n"); }
gpl-2.0
nels83/android_kernel_samsung_santos10
sound/soc/ep93xx/snappercl15.c
5024
3596
/* * snappercl15.c -- SoC audio for Bluewater Systems Snapper CL15 module * * Copyright (C) 2008 Bluewater Systems Ltd * Author: Ryan Mallon * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/platform_device.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include "../codecs/tlv320aic23.h" #include "ep93xx-pcm.h" #define CODEC_CLOCK 5644800 static int snappercl15_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int err; err = snd_soc_dai_set_sysclk(codec_dai, 0, CODEC_CLOCK, SND_SOC_CLOCK_IN); if (err) return err; err = snd_soc_dai_set_sysclk(cpu_dai, 0, CODEC_CLOCK, SND_SOC_CLOCK_OUT); if (err) return err; return 0; } static struct snd_soc_ops snappercl15_ops = { .hw_params = snappercl15_hw_params, }; static const struct snd_soc_dapm_widget tlv320aic23_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_LINE("Line In", NULL), SND_SOC_DAPM_MIC("Mic Jack", NULL), }; static const struct snd_soc_dapm_route audio_map[] = { {"Headphone Jack", NULL, "LHPOUT"}, {"Headphone Jack", NULL, "RHPOUT"}, {"LLINEIN", NULL, "Line In"}, {"RLINEIN", NULL, "Line In"}, {"MICIN", NULL, "Mic Jack"}, }; static int snappercl15_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets, ARRAY_SIZE(tlv320aic23_dapm_widgets)); snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } static struct snd_soc_dai_link snappercl15_dai = { .name = "tlv320aic23", .stream_name = "AIC23", .cpu_dai_name = "ep93xx-i2s", .codec_dai_name = "tlv320aic23-hifi", .codec_name = "tlv320aic23-codec.0-001a", .platform_name = "ep93xx-pcm-audio", .init = snappercl15_tlv320aic23_init, .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF | SND_SOC_DAIFMT_CBS_CFS, .ops = &snappercl15_ops, }; static struct snd_soc_card snd_soc_snappercl15 = { .name = "Snapper CL15", .owner = THIS_MODULE, .dai_link = &snappercl15_dai, .num_links = 1, }; static int __devinit snappercl15_probe(struct platform_device *pdev) { struct snd_soc_card *card = &snd_soc_snappercl15; int ret; ret = ep93xx_i2s_acquire(); if (ret) return ret; card->dev = &pdev->dev; ret = snd_soc_register_card(card); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); ep93xx_i2s_release(); } return ret; } static int __devexit snappercl15_remove(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); snd_soc_unregister_card(card); ep93xx_i2s_release(); return 0; } static struct platform_driver snappercl15_driver = { .driver = { .name = "snappercl15-audio", .owner = THIS_MODULE, }, .probe = snappercl15_probe, .remove = __devexit_p(snappercl15_remove), }; module_platform_driver(snappercl15_driver); MODULE_AUTHOR("Ryan Mallon"); MODULE_DESCRIPTION("ALSA SoC Snapper CL15"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:snappercl15-audio");
gpl-2.0
sicknemesis/kernel_lge_hammerhead
arch/sh/kernel/cpu/sh4a/pinmux-shx3.c
7840
19207
/* * SH-X3 prototype CPU pinmux * * Copyright (C) 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <cpu/shx3.h> enum { PINMUX_RESERVED = 0, PINMUX_DATA_BEGIN, PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA, PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA, PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA, PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA, PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA, PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA, PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA, PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA, PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA, PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA, PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA, PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA, PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA, PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA, PH5_DATA, PH4_DATA, PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA, PINMUX_DATA_END, PINMUX_INPUT_BEGIN, PA7_IN, PA6_IN, PA5_IN, PA4_IN, PA3_IN, PA2_IN, PA1_IN, PA0_IN, PB7_IN, PB6_IN, PB5_IN, PB4_IN, PB3_IN, PB2_IN, PB1_IN, PB0_IN, PC7_IN, PC6_IN, PC5_IN, PC4_IN, PC3_IN, PC2_IN, PC1_IN, PC0_IN, PD7_IN, PD6_IN, PD5_IN, PD4_IN, PD3_IN, PD2_IN, PD1_IN, PD0_IN, PE7_IN, PE6_IN, PE5_IN, PE4_IN, PE3_IN, PE2_IN, PE1_IN, PE0_IN, PF7_IN, PF6_IN, PF5_IN, PF4_IN, PF3_IN, PF2_IN, PF1_IN, PF0_IN, PG7_IN, PG6_IN, PG5_IN, PG4_IN, PG3_IN, PG2_IN, PG1_IN, PG0_IN, PH5_IN, PH4_IN, PH3_IN, PH2_IN, PH1_IN, PH0_IN, PINMUX_INPUT_END, PINMUX_INPUT_PULLUP_BEGIN, PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU, PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU, PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU, PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU, PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU, PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU, PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU, PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU, PE7_IN_PU, PE6_IN_PU, PE5_IN_PU, PE4_IN_PU, PE3_IN_PU, PE2_IN_PU, PE1_IN_PU, PE0_IN_PU, PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU, PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU, PG7_IN_PU, PG6_IN_PU, PG5_IN_PU, PG4_IN_PU, PG3_IN_PU, PG2_IN_PU, PG1_IN_PU, PG0_IN_PU, PH5_IN_PU, PH4_IN_PU, PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU, PINMUX_INPUT_PULLUP_END, PINMUX_OUTPUT_BEGIN, PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT, PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT, PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT, PB3_OUT, PB2_OUT, PB1_OUT, PB0_OUT, PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT, PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT, PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT, PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT, PE7_OUT, PE6_OUT, PE5_OUT, PE4_OUT, PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT, PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT, PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT, PG7_OUT, PG6_OUT, PG5_OUT, PG4_OUT, PG3_OUT, PG2_OUT, PG1_OUT, PG0_OUT, PH5_OUT, PH4_OUT, PH3_OUT, PH2_OUT, PH1_OUT, PH0_OUT, PINMUX_OUTPUT_END, PINMUX_FUNCTION_BEGIN, PA7_FN, PA6_FN, PA5_FN, PA4_FN, PA3_FN, PA2_FN, PA1_FN, PA0_FN, PB7_FN, PB6_FN, PB5_FN, PB4_FN, PB3_FN, PB2_FN, PB1_FN, PB0_FN, PC7_FN, PC6_FN, PC5_FN, PC4_FN, PC3_FN, PC2_FN, PC1_FN, PC0_FN, PD7_FN, PD6_FN, PD5_FN, PD4_FN, PD3_FN, PD2_FN, PD1_FN, PD0_FN, PE7_FN, PE6_FN, PE5_FN, PE4_FN, PE3_FN, PE2_FN, PE1_FN, PE0_FN, PF7_FN, PF6_FN, PF5_FN, PF4_FN, PF3_FN, PF2_FN, PF1_FN, PF0_FN, PG7_FN, PG6_FN, PG5_FN, PG4_FN, PG3_FN, PG2_FN, PG1_FN, PG0_FN, PH5_FN, PH4_FN, PH3_FN, PH2_FN, PH1_FN, PH0_FN, PINMUX_FUNCTION_END, PINMUX_MARK_BEGIN, D31_MARK, D30_MARK, D29_MARK, D28_MARK, D27_MARK, D26_MARK, D25_MARK, D24_MARK, D23_MARK, D22_MARK, D21_MARK, D20_MARK, D19_MARK, D18_MARK, D17_MARK, D16_MARK, BACK_MARK, BREQ_MARK, WE3_MARK, WE2_MARK, CS6_MARK, CS5_MARK, CS4_MARK, CLKOUTENB_MARK, DACK3_MARK, DACK2_MARK, DACK1_MARK, DACK0_MARK, DREQ3_MARK, DREQ2_MARK, DREQ1_MARK, DREQ0_MARK, IRQ3_MARK, IRQ2_MARK, IRQ1_MARK, IRQ0_MARK, DRAK3_MARK, DRAK2_MARK, DRAK1_MARK, DRAK0_MARK, SCK3_MARK, SCK2_MARK, SCK1_MARK, SCK0_MARK, IRL3_MARK, IRL2_MARK, IRL1_MARK, IRL0_MARK, TXD3_MARK, TXD2_MARK, TXD1_MARK, TXD0_MARK, RXD3_MARK, RXD2_MARK, RXD1_MARK, RXD0_MARK, CE2B_MARK, CE2A_MARK, IOIS16_MARK, STATUS1_MARK, STATUS0_MARK, IRQOUT_MARK, PINMUX_MARK_END, }; static pinmux_enum_t shx3_pinmux_data[] = { /* PA GPIO */ PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU), PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU), PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU), PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU), PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU), PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU), PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU), PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU), /* PB GPIO */ PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU), PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU), PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU), PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU), PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU), PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU), PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU), PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU), /* PC GPIO */ PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU), PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU), PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU), PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU), PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU), PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU), PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU), PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU), /* PD GPIO */ PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU), PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU), PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU), PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU), PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU), PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU), PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU), PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU), /* PE GPIO */ PINMUX_DATA(PE7_DATA, PE7_IN, PE7_OUT, PE7_IN_PU), PINMUX_DATA(PE6_DATA, PE6_IN, PE6_OUT, PE6_IN_PU), PINMUX_DATA(PE5_DATA, PE5_IN, PE5_OUT, PE5_IN_PU), PINMUX_DATA(PE4_DATA, PE4_IN, PE4_OUT, PE4_IN_PU), PINMUX_DATA(PE3_DATA, PE3_IN, PE3_OUT, PE3_IN_PU), PINMUX_DATA(PE2_DATA, PE2_IN, PE2_OUT, PE2_IN_PU), PINMUX_DATA(PE1_DATA, PE1_IN, PE1_OUT, PE1_IN_PU), PINMUX_DATA(PE0_DATA, PE0_IN, PE0_OUT, PE0_IN_PU), /* PF GPIO */ PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU), PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU), PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU), PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU), PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU), PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU), PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU), PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU), /* PG GPIO */ PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU), PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU), PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU), PINMUX_DATA(PG4_DATA, PG4_IN, PG4_OUT, PG4_IN_PU), PINMUX_DATA(PG3_DATA, PG3_IN, PG3_OUT, PG3_IN_PU), PINMUX_DATA(PG2_DATA, PG2_IN, PG2_OUT, PG2_IN_PU), PINMUX_DATA(PG1_DATA, PG1_IN, PG1_OUT, PG1_IN_PU), PINMUX_DATA(PG0_DATA, PG0_IN, PG0_OUT, PG0_IN_PU), /* PH GPIO */ PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU), PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU), PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU), PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU), PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU), PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU), /* PA FN */ PINMUX_DATA(D31_MARK, PA7_FN), PINMUX_DATA(D30_MARK, PA6_FN), PINMUX_DATA(D29_MARK, PA5_FN), PINMUX_DATA(D28_MARK, PA4_FN), PINMUX_DATA(D27_MARK, PA3_FN), PINMUX_DATA(D26_MARK, PA2_FN), PINMUX_DATA(D25_MARK, PA1_FN), PINMUX_DATA(D24_MARK, PA0_FN), /* PB FN */ PINMUX_DATA(D23_MARK, PB7_FN), PINMUX_DATA(D22_MARK, PB6_FN), PINMUX_DATA(D21_MARK, PB5_FN), PINMUX_DATA(D20_MARK, PB4_FN), PINMUX_DATA(D19_MARK, PB3_FN), PINMUX_DATA(D18_MARK, PB2_FN), PINMUX_DATA(D17_MARK, PB1_FN), PINMUX_DATA(D16_MARK, PB0_FN), /* PC FN */ PINMUX_DATA(BACK_MARK, PC7_FN), PINMUX_DATA(BREQ_MARK, PC6_FN), PINMUX_DATA(WE3_MARK, PC5_FN), PINMUX_DATA(WE2_MARK, PC4_FN), PINMUX_DATA(CS6_MARK, PC3_FN), PINMUX_DATA(CS5_MARK, PC2_FN), PINMUX_DATA(CS4_MARK, PC1_FN), PINMUX_DATA(CLKOUTENB_MARK, PC0_FN), /* PD FN */ PINMUX_DATA(DACK3_MARK, PD7_FN), PINMUX_DATA(DACK2_MARK, PD6_FN), PINMUX_DATA(DACK1_MARK, PD5_FN), PINMUX_DATA(DACK0_MARK, PD4_FN), PINMUX_DATA(DREQ3_MARK, PD3_FN), PINMUX_DATA(DREQ2_MARK, PD2_FN), PINMUX_DATA(DREQ1_MARK, PD1_FN), PINMUX_DATA(DREQ0_MARK, PD0_FN), /* PE FN */ PINMUX_DATA(IRQ3_MARK, PE7_FN), PINMUX_DATA(IRQ2_MARK, PE6_FN), PINMUX_DATA(IRQ1_MARK, PE5_FN), PINMUX_DATA(IRQ0_MARK, PE4_FN), PINMUX_DATA(DRAK3_MARK, PE3_FN), PINMUX_DATA(DRAK2_MARK, PE2_FN), PINMUX_DATA(DRAK1_MARK, PE1_FN), PINMUX_DATA(DRAK0_MARK, PE0_FN), /* PF FN */ PINMUX_DATA(SCK3_MARK, PF7_FN), PINMUX_DATA(SCK2_MARK, PF6_FN), PINMUX_DATA(SCK1_MARK, PF5_FN), PINMUX_DATA(SCK0_MARK, PF4_FN), PINMUX_DATA(IRL3_MARK, PF3_FN), PINMUX_DATA(IRL2_MARK, PF2_FN), PINMUX_DATA(IRL1_MARK, PF1_FN), PINMUX_DATA(IRL0_MARK, PF0_FN), /* PG FN */ PINMUX_DATA(TXD3_MARK, PG7_FN), PINMUX_DATA(TXD2_MARK, PG6_FN), PINMUX_DATA(TXD1_MARK, PG5_FN), PINMUX_DATA(TXD0_MARK, PG4_FN), PINMUX_DATA(RXD3_MARK, PG3_FN), PINMUX_DATA(RXD2_MARK, PG2_FN), PINMUX_DATA(RXD1_MARK, PG1_FN), PINMUX_DATA(RXD0_MARK, PG0_FN), /* PH FN */ PINMUX_DATA(CE2B_MARK, PH5_FN), PINMUX_DATA(CE2A_MARK, PH4_FN), PINMUX_DATA(IOIS16_MARK, PH3_FN), PINMUX_DATA(STATUS1_MARK, PH2_FN), PINMUX_DATA(STATUS0_MARK, PH1_FN), PINMUX_DATA(IRQOUT_MARK, PH0_FN), }; static struct pinmux_gpio shx3_pinmux_gpios[] = { /* PA */ PINMUX_GPIO(GPIO_PA7, PA7_DATA), PINMUX_GPIO(GPIO_PA6, PA6_DATA), PINMUX_GPIO(GPIO_PA5, PA5_DATA), PINMUX_GPIO(GPIO_PA4, PA4_DATA), PINMUX_GPIO(GPIO_PA3, PA3_DATA), PINMUX_GPIO(GPIO_PA2, PA2_DATA), PINMUX_GPIO(GPIO_PA1, PA1_DATA), PINMUX_GPIO(GPIO_PA0, PA0_DATA), /* PB */ PINMUX_GPIO(GPIO_PB7, PB7_DATA), PINMUX_GPIO(GPIO_PB6, PB6_DATA), PINMUX_GPIO(GPIO_PB5, PB5_DATA), PINMUX_GPIO(GPIO_PB4, PB4_DATA), PINMUX_GPIO(GPIO_PB3, PB3_DATA), PINMUX_GPIO(GPIO_PB2, PB2_DATA), PINMUX_GPIO(GPIO_PB1, PB1_DATA), PINMUX_GPIO(GPIO_PB0, PB0_DATA), /* PC */ PINMUX_GPIO(GPIO_PC7, PC7_DATA), PINMUX_GPIO(GPIO_PC6, PC6_DATA), PINMUX_GPIO(GPIO_PC5, PC5_DATA), PINMUX_GPIO(GPIO_PC4, PC4_DATA), PINMUX_GPIO(GPIO_PC3, PC3_DATA), PINMUX_GPIO(GPIO_PC2, PC2_DATA), PINMUX_GPIO(GPIO_PC1, PC1_DATA), PINMUX_GPIO(GPIO_PC0, PC0_DATA), /* PD */ PINMUX_GPIO(GPIO_PD7, PD7_DATA), PINMUX_GPIO(GPIO_PD6, PD6_DATA), PINMUX_GPIO(GPIO_PD5, PD5_DATA), PINMUX_GPIO(GPIO_PD4, PD4_DATA), PINMUX_GPIO(GPIO_PD3, PD3_DATA), PINMUX_GPIO(GPIO_PD2, PD2_DATA), PINMUX_GPIO(GPIO_PD1, PD1_DATA), PINMUX_GPIO(GPIO_PD0, PD0_DATA), /* PE */ PINMUX_GPIO(GPIO_PE7, PE7_DATA), PINMUX_GPIO(GPIO_PE6, PE6_DATA), PINMUX_GPIO(GPIO_PE5, PE5_DATA), PINMUX_GPIO(GPIO_PE4, PE4_DATA), PINMUX_GPIO(GPIO_PE3, PE3_DATA), PINMUX_GPIO(GPIO_PE2, PE2_DATA), PINMUX_GPIO(GPIO_PE1, PE1_DATA), PINMUX_GPIO(GPIO_PE0, PE0_DATA), /* PF */ PINMUX_GPIO(GPIO_PF7, PF7_DATA), PINMUX_GPIO(GPIO_PF6, PF6_DATA), PINMUX_GPIO(GPIO_PF5, PF5_DATA), PINMUX_GPIO(GPIO_PF4, PF4_DATA), PINMUX_GPIO(GPIO_PF3, PF3_DATA), PINMUX_GPIO(GPIO_PF2, PF2_DATA), PINMUX_GPIO(GPIO_PF1, PF1_DATA), PINMUX_GPIO(GPIO_PF0, PF0_DATA), /* PG */ PINMUX_GPIO(GPIO_PG7, PG7_DATA), PINMUX_GPIO(GPIO_PG6, PG6_DATA), PINMUX_GPIO(GPIO_PG5, PG5_DATA), PINMUX_GPIO(GPIO_PG4, PG4_DATA), PINMUX_GPIO(GPIO_PG3, PG3_DATA), PINMUX_GPIO(GPIO_PG2, PG2_DATA), PINMUX_GPIO(GPIO_PG1, PG1_DATA), PINMUX_GPIO(GPIO_PG0, PG0_DATA), /* PH */ PINMUX_GPIO(GPIO_PH5, PH5_DATA), PINMUX_GPIO(GPIO_PH4, PH4_DATA), PINMUX_GPIO(GPIO_PH3, PH3_DATA), PINMUX_GPIO(GPIO_PH2, PH2_DATA), PINMUX_GPIO(GPIO_PH1, PH1_DATA), PINMUX_GPIO(GPIO_PH0, PH0_DATA), /* FN */ PINMUX_GPIO(GPIO_FN_D31, D31_MARK), PINMUX_GPIO(GPIO_FN_D30, D30_MARK), PINMUX_GPIO(GPIO_FN_D29, D29_MARK), PINMUX_GPIO(GPIO_FN_D28, D28_MARK), PINMUX_GPIO(GPIO_FN_D27, D27_MARK), PINMUX_GPIO(GPIO_FN_D26, D26_MARK), PINMUX_GPIO(GPIO_FN_D25, D25_MARK), PINMUX_GPIO(GPIO_FN_D24, D24_MARK), PINMUX_GPIO(GPIO_FN_D23, D23_MARK), PINMUX_GPIO(GPIO_FN_D22, D22_MARK), PINMUX_GPIO(GPIO_FN_D21, D21_MARK), PINMUX_GPIO(GPIO_FN_D20, D20_MARK), PINMUX_GPIO(GPIO_FN_D19, D19_MARK), PINMUX_GPIO(GPIO_FN_D18, D18_MARK), PINMUX_GPIO(GPIO_FN_D17, D17_MARK), PINMUX_GPIO(GPIO_FN_D16, D16_MARK), PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK), PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK), PINMUX_GPIO(GPIO_FN_WE3, WE3_MARK), PINMUX_GPIO(GPIO_FN_WE2, WE2_MARK), PINMUX_GPIO(GPIO_FN_CS6, CS6_MARK), PINMUX_GPIO(GPIO_FN_CS5, CS5_MARK), PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK), PINMUX_GPIO(GPIO_FN_CLKOUTENB, CLKOUTENB_MARK), PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK), PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK), PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK), PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK), PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK), PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK), PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK), PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK), PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK), PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK), PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK), PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK), PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK), PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK), PINMUX_GPIO(GPIO_FN_DRAK1, DRAK1_MARK), PINMUX_GPIO(GPIO_FN_DRAK0, DRAK0_MARK), PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK), PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK), PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK), PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK), PINMUX_GPIO(GPIO_FN_IRL3, IRL3_MARK), PINMUX_GPIO(GPIO_FN_IRL2, IRL2_MARK), PINMUX_GPIO(GPIO_FN_IRL1, IRL1_MARK), PINMUX_GPIO(GPIO_FN_IRL0, IRL0_MARK), PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK), PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK), PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK), PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK), PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK), PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK), PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK), PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK), PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK), PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK), PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK), PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK), PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK), PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK), }; static struct pinmux_cfg_reg shx3_pinmux_config_regs[] = { { PINMUX_CFG_REG("PABCR", 0xffc70000, 32, 2) { PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU, PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU, PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU, PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU, PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU, PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU, PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU, PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU, PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU, PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU, PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU, PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU, PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU, PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU, PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU, PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU, }, }, { PINMUX_CFG_REG("PCDCR", 0xffc70004, 32, 2) { PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU, PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU, PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU, PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU, PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU, PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU, PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU, PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU, PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU, PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU, PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU, PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU, PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU, PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU, PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU, PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU, }, }, { PINMUX_CFG_REG("PEFCR", 0xffc70008, 32, 2) { PE7_FN, PE7_OUT, PE7_IN, PE7_IN_PU, PE6_FN, PE6_OUT, PE6_IN, PE6_IN_PU, PE5_FN, PE5_OUT, PE5_IN, PE5_IN_PU, PE4_FN, PE4_OUT, PE4_IN, PE4_IN_PU, PE3_FN, PE3_OUT, PE3_IN, PE3_IN_PU, PE2_FN, PE2_OUT, PE2_IN, PE2_IN_PU, PE1_FN, PE1_OUT, PE1_IN, PE1_IN_PU, PE0_FN, PE0_OUT, PE0_IN, PE0_IN_PU, PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU, PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU, PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU, PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU, PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU, PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU, PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU, PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU, }, }, { PINMUX_CFG_REG("PGHCR", 0xffc7000c, 32, 2) { PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU, PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU, PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU, PG4_FN, PG4_OUT, PG4_IN, PG4_IN_PU, PG3_FN, PG3_OUT, PG3_IN, PG3_IN_PU, PG2_FN, PG2_OUT, PG2_IN, PG2_IN_PU, PG1_FN, PG1_OUT, PG1_IN, PG1_IN_PU, PG0_FN, PG0_OUT, PG0_IN, PG0_IN_PU, 0, 0, 0, 0, 0, 0, 0, 0, PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU, PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU, PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU, PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU, PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU, PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU, }, }, { }, }; static struct pinmux_data_reg shx3_pinmux_data_regs[] = { { PINMUX_DATA_REG("PABDR", 0xffc70010, 32) { 0, 0, 0, 0, 0, 0, 0, 0, PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA, PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA, 0, 0, 0, 0, 0, 0, 0, 0, PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA, PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA, }, }, { PINMUX_DATA_REG("PCDDR", 0xffc70014, 32) { 0, 0, 0, 0, 0, 0, 0, 0, PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA, PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA, 0, 0, 0, 0, 0, 0, 0, 0, PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA, PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA, }, }, { PINMUX_DATA_REG("PEFDR", 0xffc70018, 32) { 0, 0, 0, 0, 0, 0, 0, 0, PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA, PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA, 0, 0, 0, 0, 0, 0, 0, 0, PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA, PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA, }, }, { PINMUX_DATA_REG("PGHDR", 0xffc7001c, 32) { 0, 0, 0, 0, 0, 0, 0, 0, PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA, PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PH5_DATA, PH4_DATA, PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA, }, }, { }, }; static struct pinmux_info shx3_pinmux_info = { .name = "shx3_pfc", .reserved_id = PINMUX_RESERVED, .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END }, .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END }, .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .first_gpio = GPIO_PA7, .last_gpio = GPIO_FN_IRQOUT, .gpios = shx3_pinmux_gpios, .gpio_data = shx3_pinmux_data, .gpio_data_size = ARRAY_SIZE(shx3_pinmux_data), .cfg_regs = shx3_pinmux_config_regs, .data_regs = shx3_pinmux_data_regs, }; static int __init shx3_pinmux_setup(void) { return register_pinmux(&shx3_pinmux_info); } arch_initcall(shx3_pinmux_setup);
gpl-2.0
sleshepic/SM-G900P_kernel
net/netfilter/ipvs/ip_vs_rr.c
8352
3003
/* * IPVS: Round-Robin Scheduling module * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * Peter Kese <peter.kese@ijs.si> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Fixes/Changes: * Wensong Zhang : changed the ip_vs_rr_schedule to return dest * Julian Anastasov : fixed the NULL pointer access bug in debugging * Wensong Zhang : changed some comestics things for debugging * Wensong Zhang : changed for the d-linked destination list * Wensong Zhang : added the ip_vs_rr_update_svc * Wensong Zhang : added any dest with weight=0 is quiesced * */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <net/ip_vs.h> static int ip_vs_rr_init_svc(struct ip_vs_service *svc) { svc->sched_data = &svc->destinations; return 0; } static int ip_vs_rr_update_svc(struct ip_vs_service *svc) { svc->sched_data = &svc->destinations; return 0; } /* * Round-Robin Scheduling */ static struct ip_vs_dest * ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) { struct list_head *p, *q; struct ip_vs_dest *dest; IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); write_lock(&svc->sched_lock); p = (struct list_head *)svc->sched_data; p = p->next; q = p; do { /* skip list head */ if (q == &svc->destinations) { q = q->next; continue; } dest = list_entry(q, struct ip_vs_dest, n_list); if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && atomic_read(&dest->weight) > 0) /* HIT */ goto out; q = q->next; } while (q != p); write_unlock(&svc->sched_lock); ip_vs_scheduler_err(svc, "no destination available"); return NULL; out: svc->sched_data = q; write_unlock(&svc->sched_lock); IP_VS_DBG_BUF(6, "RR: server %s:%u " "activeconns %d refcnt %d weight %d\n", IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port), atomic_read(&dest->activeconns), atomic_read(&dest->refcnt), atomic_read(&dest->weight)); return dest; } static struct ip_vs_scheduler ip_vs_rr_scheduler = { .name = "rr", /* name */ .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list), .init_service = ip_vs_rr_init_svc, .update_service = ip_vs_rr_update_svc, .schedule = ip_vs_rr_schedule, }; static int __init ip_vs_rr_init(void) { return register_ip_vs_scheduler(&ip_vs_rr_scheduler); } static void __exit ip_vs_rr_cleanup(void) { unregister_ip_vs_scheduler(&ip_vs_rr_scheduler); } module_init(ip_vs_rr_init); module_exit(ip_vs_rr_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
oppo-source/Neo5-kernel-source
arch/powerpc/platforms/pseries/pci.c
9376
3169
/* * Copyright (C) 2001 Dave Engebretsen, IBM Corporation * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM * * pSeries specific routines for PCI. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/string.h> #include <asm/eeh.h> #include <asm/pci-bridge.h> #include <asm/prom.h> #include <asm/ppc-pci.h> #if 0 void pcibios_name_device(struct pci_dev *dev) { struct device_node *dn; /* * Add IBM loc code (slot) as a prefix to the device names for service */ dn = pci_device_to_OF_node(dev); if (dn) { const char *loc_code = of_get_property(dn, "ibm,loc-code", 0); if (loc_code) { int loc_len = strlen(loc_code); if (loc_len < sizeof(dev->dev.name)) { memmove(dev->dev.name+loc_len+1, dev->dev.name, sizeof(dev->dev.name)-loc_len-1); memcpy(dev->dev.name, loc_code, loc_len); dev->dev.name[loc_len] = ' '; dev->dev.name[sizeof(dev->dev.name)-1] = '\0'; } } } } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device); #endif static void __init pSeries_request_regions(void) { if (!isa_io_base) return; request_region(0x20,0x20,"pic1"); request_region(0xa0,0x20,"pic2"); request_region(0x00,0x20,"dma1"); request_region(0x40,0x20,"timer"); request_region(0x80,0x10,"dma page reg"); request_region(0xc0,0x20,"dma2"); } void __init pSeries_final_fixup(void) { pSeries_request_regions(); pci_addr_cache_build(); } /* * Assume the winbond 82c105 is the IDE controller on a * p610/p615/p630. We should probably be more careful in case * someone tries to plug in a similar adapter. */ static void fixup_winbond_82c105(struct pci_dev* dev) { int i; unsigned int reg; if (!machine_is(pseries)) return; printk("Using INTC for W82c105 IDE controller.\n"); pci_read_config_dword(dev, 0x40, &reg); /* Enable LEGIRQ to use INTC instead of ISA interrupts */ pci_write_config_dword(dev, 0x40, reg | (1<<11)); for (i = 0; i < DEVICE_COUNT_RESOURCE; ++i) { /* zap the 2nd function of the winbond chip */ if (dev->resource[i].flags & IORESOURCE_IO && dev->bus->number == 0 && dev->devfn == 0x81) dev->resource[i].flags &= ~IORESOURCE_IO; if (dev->resource[i].start == 0 && dev->resource[i].end) { dev->resource[i].flags = 0; dev->resource[i].end = 0; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105, fixup_winbond_82c105);
gpl-2.0
kirananto/RAZORFERRARI
fs/fscache/netfs.c
11680
2660
/* FS-Cache netfs (client) registration * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #define FSCACHE_DEBUG_LEVEL COOKIE #include <linux/module.h> #include <linux/slab.h> #include "internal.h" static LIST_HEAD(fscache_netfs_list); /* * register a network filesystem for caching */ int __fscache_register_netfs(struct fscache_netfs *netfs) { struct fscache_netfs *ptr; int ret; _enter("{%s}", netfs->name); INIT_LIST_HEAD(&netfs->link); /* allocate a cookie for the primary index */ netfs->primary_index = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL); if (!netfs->primary_index) { _leave(" = -ENOMEM"); return -ENOMEM; } /* initialise the primary index cookie */ atomic_set(&netfs->primary_index->usage, 1); atomic_set(&netfs->primary_index->n_children, 0); netfs->primary_index->def = &fscache_fsdef_netfs_def; netfs->primary_index->parent = &fscache_fsdef_index; netfs->primary_index->netfs_data = netfs; atomic_inc(&netfs->primary_index->parent->usage); atomic_inc(&netfs->primary_index->parent->n_children); spin_lock_init(&netfs->primary_index->lock); INIT_HLIST_HEAD(&netfs->primary_index->backing_objects); /* check the netfs type is not already present */ down_write(&fscache_addremove_sem); ret = -EEXIST; list_for_each_entry(ptr, &fscache_netfs_list, link) { if (strcmp(ptr->name, netfs->name) == 0) goto already_registered; } list_add(&netfs->link, &fscache_netfs_list); ret = 0; printk(KERN_NOTICE "FS-Cache: Netfs '%s' registered for caching\n", netfs->name); already_registered: up_write(&fscache_addremove_sem); if (ret < 0) { netfs->primary_index->parent = NULL; __fscache_cookie_put(netfs->primary_index); netfs->primary_index = NULL; } _leave(" = %d", ret); return ret; } EXPORT_SYMBOL(__fscache_register_netfs); /* * unregister a network filesystem from the cache * - all cookies must have been released first */ void __fscache_unregister_netfs(struct fscache_netfs *netfs) { _enter("{%s.%u}", netfs->name, netfs->version); down_write(&fscache_addremove_sem); list_del(&netfs->link); fscache_relinquish_cookie(netfs->primary_index, 0); up_write(&fscache_addremove_sem); printk(KERN_NOTICE "FS-Cache: Netfs '%s' unregistered from caching\n", netfs->name); _leave(""); } EXPORT_SYMBOL(__fscache_unregister_netfs);
gpl-2.0
caio2k/kernel-n9
fs/fscache/netfs.c
11680
2660
/* FS-Cache netfs (client) registration * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #define FSCACHE_DEBUG_LEVEL COOKIE #include <linux/module.h> #include <linux/slab.h> #include "internal.h" static LIST_HEAD(fscache_netfs_list); /* * register a network filesystem for caching */ int __fscache_register_netfs(struct fscache_netfs *netfs) { struct fscache_netfs *ptr; int ret; _enter("{%s}", netfs->name); INIT_LIST_HEAD(&netfs->link); /* allocate a cookie for the primary index */ netfs->primary_index = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL); if (!netfs->primary_index) { _leave(" = -ENOMEM"); return -ENOMEM; } /* initialise the primary index cookie */ atomic_set(&netfs->primary_index->usage, 1); atomic_set(&netfs->primary_index->n_children, 0); netfs->primary_index->def = &fscache_fsdef_netfs_def; netfs->primary_index->parent = &fscache_fsdef_index; netfs->primary_index->netfs_data = netfs; atomic_inc(&netfs->primary_index->parent->usage); atomic_inc(&netfs->primary_index->parent->n_children); spin_lock_init(&netfs->primary_index->lock); INIT_HLIST_HEAD(&netfs->primary_index->backing_objects); /* check the netfs type is not already present */ down_write(&fscache_addremove_sem); ret = -EEXIST; list_for_each_entry(ptr, &fscache_netfs_list, link) { if (strcmp(ptr->name, netfs->name) == 0) goto already_registered; } list_add(&netfs->link, &fscache_netfs_list); ret = 0; printk(KERN_NOTICE "FS-Cache: Netfs '%s' registered for caching\n", netfs->name); already_registered: up_write(&fscache_addremove_sem); if (ret < 0) { netfs->primary_index->parent = NULL; __fscache_cookie_put(netfs->primary_index); netfs->primary_index = NULL; } _leave(" = %d", ret); return ret; } EXPORT_SYMBOL(__fscache_register_netfs); /* * unregister a network filesystem from the cache * - all cookies must have been released first */ void __fscache_unregister_netfs(struct fscache_netfs *netfs) { _enter("{%s.%u}", netfs->name, netfs->version); down_write(&fscache_addremove_sem); list_del(&netfs->link); fscache_relinquish_cookie(netfs->primary_index, 0); up_write(&fscache_addremove_sem); printk(KERN_NOTICE "FS-Cache: Netfs '%s' unregistered from caching\n", netfs->name); _leave(""); } EXPORT_SYMBOL(__fscache_unregister_netfs);
gpl-2.0
hmbedded/bbb-dac-old
arch/arm/mach-omap2/dsp.c
161
3095
/* * TI's OMAP DSP platform device registration * * Copyright (C) 2005-2006 Texas Instruments, Inc. * Copyright (C) 2009 Nokia Corporation * * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* * XXX The function pointers to the PRM/CM functions are incorrect and * should be removed. No device driver should be changing PRM/CM bits * directly; that's a layering violation -- those bits are the responsibility * of the OMAP PM core code. */ #include <linux/module.h> #include <linux/platform_device.h> #include <asm/memblock.h> #include "control.h" #include "cm2xxx_3xxx.h" #include "prm2xxx_3xxx.h" #ifdef CONFIG_BRIDGE_DVFS #include "omap-pm.h" #endif #include <linux/platform_data/dsp-omap.h> static struct platform_device *omap_dsp_pdev; static struct omap_dsp_platform_data omap_dsp_pdata __initdata = { #ifdef CONFIG_BRIDGE_DVFS .dsp_set_min_opp = omap_pm_dsp_set_min_opp, .dsp_get_opp = omap_pm_dsp_get_opp, .cpu_set_freq = omap_pm_cpu_set_freq, .cpu_get_freq = omap_pm_cpu_get_freq, #endif .dsp_prm_read = omap2_prm_read_mod_reg, .dsp_prm_write = omap2_prm_write_mod_reg, .dsp_prm_rmw_bits = omap2_prm_rmw_mod_reg_bits, .dsp_cm_read = omap2_cm_read_mod_reg, .dsp_cm_write = omap2_cm_write_mod_reg, .dsp_cm_rmw_bits = omap2_cm_rmw_mod_reg_bits, .set_bootaddr = omap_ctrl_write_dsp_boot_addr, .set_bootmode = omap_ctrl_write_dsp_boot_mode, }; static phys_addr_t omap_dsp_phys_mempool_base; void __init omap_dsp_reserve_sdram_memblock(void) { phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE; phys_addr_t paddr; if (!size) return; paddr = arm_memblock_steal(size, SZ_1M); if (!paddr) { pr_err("%s: failed to reserve %llx bytes\n", __func__, (unsigned long long)size); return; } omap_dsp_phys_mempool_base = paddr; } static phys_addr_t omap_dsp_get_mempool_base(void) { return omap_dsp_phys_mempool_base; } static int __init omap_dsp_init(void) { struct platform_device *pdev; int err = -ENOMEM; struct omap_dsp_platform_data *pdata = &omap_dsp_pdata; pdata->phys_mempool_base = omap_dsp_get_mempool_base(); if (pdata->phys_mempool_base) { pdata->phys_mempool_size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE; pr_info("%s: %llx bytes @ %llx\n", __func__, (unsigned long long)pdata->phys_mempool_size, (unsigned long long)pdata->phys_mempool_base); } pdev = platform_device_alloc("omap-dsp", -1); if (!pdev) goto err_out; err = platform_device_add_data(pdev, pdata, sizeof(*pdata)); if (err) goto err_out; err = platform_device_add(pdev); if (err) goto err_out; omap_dsp_pdev = pdev; return 0; err_out: platform_device_put(pdev); return err; } module_init(omap_dsp_init); static void __exit omap_dsp_exit(void) { platform_device_unregister(omap_dsp_pdev); } module_exit(omap_dsp_exit); MODULE_AUTHOR("Hiroshi DOYU"); MODULE_DESCRIPTION("TI's OMAP DSP platform device registration"); MODULE_LICENSE("GPL");
gpl-2.0
MH2033/VIPER_KERNEL_KK_D802
drivers/broadcast/tdmb/fc8050_z/src/fc8050_tun.c
161
5701
/***************************************************************************** Copyright(c) 2009 FCI Inc. All Rights Reserved File name : fc8050_tun.c Description : fc8050 host interface History : ---------------------------------------------------------------------- 2009/09/14 jason initial 2009/11/26 Config1p0 *******************************************************************************/ #include "../inc/fci_types.h" #include "../inc/fci_oal.h" #include "../inc/fci_tun.h" #include "../inc/fci_hal.h" #include "../inc/fc8050_regs.h" static int fc8050_write(HANDLE hDevice, fci_u8 addr, fci_u8 data) { int res; fci_u8 tmp; tmp = data; res = tuner_i2c_write(hDevice, addr, 1,&tmp, 1); return res; } static int fc8050_read(HANDLE hDevice, fci_u8 addr, fci_u8 *data) { int res; res = tuner_i2c_read(hDevice, addr, 1,data, 1); return res; } static int fc8050_set_filter(HANDLE hDevice) { int i; fci_u8 cal_mon = 0; #if (FC8050_FREQ_XTAL == 19200) fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x3B, 0x52); fc8050_write(hDevice, 0x32, 0x09); #elif (FC8050_FREQ_XTAL == 16384) fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x3B, 0x45); fc8050_write(hDevice, 0x32, 0x09); #elif (FC8050_FREQ_XTAL == 24576) fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x3B, 0x68); fc8050_write(hDevice, 0x32, 0x09); #elif (FC8050_FREQ_XTAL == 27000) fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x3B, 0x71); fc8050_write(hDevice, 0x32, 0x09); #elif (FC8050_FREQ_XTAL == 38400) fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x3B, 0xA1); fc8050_write(hDevice, 0x32, 0x09); #else return BBM_NOK; #endif for(i=0; i<10; i++) { msMustWait(5); fc8050_read(hDevice, 0x33, &cal_mon); if( (cal_mon & 0xC0) == 0xC0) break; fc8050_write(hDevice, 0x32, 0x01); fc8050_write(hDevice, 0x32, 0x09); } fc8050_write(hDevice, 0x32, 0x01); return BBM_OK; } static int fc8050_lband_init(HANDLE hDevice) { PRINTF(hDevice, "fc8050_lband_init\r\n"); return BBM_NOK; } static int fc8050_band3_init(HANDLE hDevice) { PRINTF(hDevice, "fc8050_band3_init\r\n"); fc8050_write(hDevice, 0x00, 0x00); fc8050_write(hDevice, 0x00, 0x00); fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x05, 0xD8); fc8050_write(hDevice, 0x0A, 0x83); fc8050_write(hDevice, 0x16, 0x0d); fc8050_write(hDevice, 0x13, 0x88); fc8050_write(hDevice, 0x15, 0x00); fc8050_write(hDevice, 0x21, 0x73); fc8050_write(hDevice, 0x57, 0x40); fc8050_write(hDevice, 0x69, 0x8C); fc8050_write(hDevice, 0x51, 0x04); fc8050_write(hDevice, 0x53, 0x00); fc8050_write(hDevice, 0x54, 0x28); fc8050_write(hDevice, 0x45, 0x40); fc8050_write(hDevice, 0x46, 0x32); fc8050_write(hDevice, 0x48, 0x40); fc8050_write(hDevice, 0x49, 0x32); fc8050_write(hDevice, 0x7A, 0x88); fc8050_write(hDevice, 0x53, 0x01); fc8050_write(hDevice, 0x58, 0x34); fc8050_write(hDevice, 0x59, 0x2A); fc8050_write(hDevice, 0x5A, 0x1D); fc8050_write(hDevice, 0x5B, 0x14); fc8050_write(hDevice, 0x61, 0x64); fc8050_write(hDevice, 0x74, 0x3A); fc8050_write(hDevice, 0x75, 0x1E); fc8050_write(hDevice, 0x6A, 0x0C); fc8050_write(hDevice, 0x6C, 0x0C); fc8050_write(hDevice, 0x6E, 0x0C); fc8050_write(hDevice, 0x70, 0x0C); fc8050_write(hDevice, 0x72, 0x0C); fc8050_write(hDevice, 0x7C, 0x0C); fc8050_write(hDevice, 0x4E, 0x26); fc8050_write(hDevice, 0x31, 0x13); fc8050_write(hDevice, 0x34, 0x53); fc8050_write(hDevice, 0x43, 0x20); fc8050_write(hDevice, 0x2e, 0x70); fc8050_set_filter(hDevice); return BBM_OK; } int fc8050_tuner_init(HANDLE hDevice, fci_u32 band) { int res = BBM_NOK; bbm_write(hDevice, BBM_QDD_COMMAN, 0x5C); bbm_write(hDevice, BBM_QDD_AGC_STEP, 0x03); bbm_write(hDevice, BBM_QDD_TUN_COMMA, 0x40); bbm_write(hDevice, BBM_QDD_TUN_GAIN, 0x24); bbm_write(hDevice, BBM_QDD_AGC_PERIOD, 0x14); bbm_write(hDevice, BBM_QDD_TRAGET_RMS, 0x60); bbm_write(hDevice, BBM_QDD_TUN_GAIN_LOC, 0x44); bbm_write(hDevice, BBM_QDD_GAIN_MAX, 0x38); if(band == LBAND_TYPE) res = fc8050_lband_init(hDevice); else if(band == BAND3_TYPE) res = fc8050_band3_init(hDevice); else return BBM_NOK; if(res != BBM_OK) return res; return res; } int fc8050_set_freq(HANDLE hDevice, fci_u32 band, fci_u32 f_lo ) { fci_u32 f_diff, f_diff_shifted, n_val, k_val; fci_u32 f_vco = f_lo * 12; fci_u32 r_val = ( f_vco >= 25*FC8050_FREQ_XTAL )? 1 : 2; fci_u32 f_comp = FC8050_FREQ_XTAL/r_val; fci_u8 pre_shift_bits = 4; fci_u8 data_0x0E; fc8050_write(hDevice, 0x0a, 0x85); fc8050_write(hDevice, 0x16, 0x0d); n_val = f_vco / f_comp; f_diff = f_vco - f_comp * n_val; f_diff_shifted = f_diff << ( 20 - pre_shift_bits ); k_val = f_diff_shifted / ( ( f_comp ) >> pre_shift_bits ); k_val = ( f_diff_shifted + ( f_comp >> (pre_shift_bits+1) ) ) / ( f_comp >> pre_shift_bits ); data_0x0E = ( ( r_val == 1 )? 0x40 : 0x50 ) + (unsigned char)(k_val >> 16); fc8050_write(hDevice, 0x0E, data_0x0E); fc8050_write(hDevice, 0x0F, (unsigned char)( k_val >> 8 ) ); fc8050_write(hDevice, 0x10, (unsigned char)( k_val ) ); fc8050_write(hDevice, 0x11, (unsigned char)( n_val ) ); fc8050_write(hDevice, 0x0a, 0x83); return BBM_OK; } int fc8050_get_rssi(HANDLE hDevice, int *rssi) { int res = BBM_OK; fci_u8 LNA, RFVGA, PREAMP_PGA, CSF = 0x00; int K = -66; res = fc8050_read(hDevice, 0x76, &LNA); res |= fc8050_read(hDevice, 0x77, &RFVGA); res |= fc8050_read(hDevice, 0x78, &CSF); res |= fc8050_read(hDevice, 0x79, &PREAMP_PGA); if(res != BBM_OK) return res; *rssi = (((LNA & 0x07) * 5) + (RFVGA*7/10) + (( PREAMP_PGA >> 7) * 6) + ((CSF & 0x7) * 6) - ((PREAMP_PGA & 0x7F)/2) + K); return BBM_OK; }
gpl-2.0
Tkkg1994/SuperKernel
drivers/gpio/gpio-octeon.c
417
3860
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2011, 2012 Cavium Inc. */ #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/gpio.h> #include <linux/io.h> #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-gpio-defs.h> #define RX_DAT 0x80 #define TX_SET 0x88 #define TX_CLEAR 0x90 /* * The address offset of the GPIO configuration register for a given * line. */ static unsigned int bit_cfg_reg(unsigned int offset) { /* * The register stride is 8, with a discontinuity after the * first 16. */ if (offset < 16) return 8 * offset; else return 8 * (offset - 16) + 0x100; } struct octeon_gpio { struct gpio_chip chip; u64 register_base; }; static int octeon_gpio_dir_in(struct gpio_chip *chip, unsigned offset) { struct octeon_gpio *gpio = container_of(chip, struct octeon_gpio, chip); cvmx_write_csr(gpio->register_base + bit_cfg_reg(offset), 0); return 0; } static void octeon_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct octeon_gpio *gpio = container_of(chip, struct octeon_gpio, chip); u64 mask = 1ull << offset; u64 reg = gpio->register_base + (value ? TX_SET : TX_CLEAR); cvmx_write_csr(reg, mask); } static int octeon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value) { struct octeon_gpio *gpio = container_of(chip, struct octeon_gpio, chip); union cvmx_gpio_bit_cfgx cfgx; octeon_gpio_set(chip, offset, value); cfgx.u64 = 0; cfgx.s.tx_oe = 1; cvmx_write_csr(gpio->register_base + bit_cfg_reg(offset), cfgx.u64); return 0; } static int octeon_gpio_get(struct gpio_chip *chip, unsigned offset) { struct octeon_gpio *gpio = container_of(chip, struct octeon_gpio, chip); u64 read_bits = cvmx_read_csr(gpio->register_base + RX_DAT); return ((1ull << offset) & read_bits) != 0; } static int octeon_gpio_probe(struct platform_device *pdev) { struct octeon_gpio *gpio; struct gpio_chip *chip; struct resource *res_mem; int err = 0; gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); if (!gpio) return -ENOMEM; chip = &gpio->chip; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res_mem == NULL) { dev_err(&pdev->dev, "found no memory resource\n"); err = -ENXIO; goto out; } if (!devm_request_mem_region(&pdev->dev, res_mem->start, resource_size(res_mem), res_mem->name)) { dev_err(&pdev->dev, "request_mem_region failed\n"); err = -ENXIO; goto out; } gpio->register_base = (u64)devm_ioremap(&pdev->dev, res_mem->start, resource_size(res_mem)); pdev->dev.platform_data = chip; chip->label = "octeon-gpio"; chip->dev = &pdev->dev; chip->owner = THIS_MODULE; chip->base = 0; chip->can_sleep = false; chip->ngpio = 20; chip->direction_input = octeon_gpio_dir_in; chip->get = octeon_gpio_get; chip->direction_output = octeon_gpio_dir_out; chip->set = octeon_gpio_set; err = gpiochip_add(chip); if (err) goto out; dev_info(&pdev->dev, "OCTEON GPIO driver probed.\n"); out: return err; } static int octeon_gpio_remove(struct platform_device *pdev) { struct gpio_chip *chip = pdev->dev.platform_data; gpiochip_remove(chip); return 0; } static struct of_device_id octeon_gpio_match[] = { { .compatible = "cavium,octeon-3860-gpio", }, {}, }; MODULE_DEVICE_TABLE(of, octeon_gpio_match); static struct platform_driver octeon_gpio_driver = { .driver = { .name = "octeon_gpio", .owner = THIS_MODULE, .of_match_table = octeon_gpio_match, }, .probe = octeon_gpio_probe, .remove = octeon_gpio_remove, }; module_platform_driver(octeon_gpio_driver); MODULE_DESCRIPTION("Cavium Inc. OCTEON GPIO Driver"); MODULE_AUTHOR("David Daney"); MODULE_LICENSE("GPL");
gpl-2.0
fermasia/android_kernel_oneplus_msm8974
drivers/usb/serial/usb_wwan.c
417
22867
/* USB Driver layer for GSM modems Copyright (C) 2005 Matthias Urlichs <smurf@smurf.noris.de> This driver is free software; you can redistribute it and/or modify it under the terms of Version 2 of the GNU General Public License as published by the Free Software Foundation. Portions copied from the Keyspan driver by Hugh Blemings <hugh@blemings.org> History: see the git log. Work sponsored by: Sigos GmbH, Germany <info@sigos.de> This driver exists because the "normal" serial driver doesn't work too well with GSM modems. Issues: - data loss -- one single Receive URB is not nearly enough - controlling the baud rate doesn't make sense */ #define DRIVER_VERSION "v0.7.2" #define DRIVER_AUTHOR "Matthias Urlichs <smurf@smurf.noris.de>" #define DRIVER_DESC "USB Driver for GSM modems" #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/serial.h> #include "usb-wwan.h" static bool debug; void usb_wwan_dtr_rts(struct usb_serial_port *port, int on) { struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; dbg("%s", __func__); intfdata = port->serial->private; if (!intfdata->send_setup) return; portdata = usb_get_serial_port_data(port); /* FIXME: locking */ portdata->rts_state = on; portdata->dtr_state = on; intfdata->send_setup(port); } EXPORT_SYMBOL(usb_wwan_dtr_rts); void usb_wwan_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { struct usb_wwan_intf_private *intfdata = port->serial->private; dbg("%s", __func__); /* Doesn't support option setting */ tty_termios_copy_hw(tty->termios, old_termios); if (intfdata->send_setup) intfdata->send_setup(port); } EXPORT_SYMBOL(usb_wwan_set_termios); int usb_wwan_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; unsigned int value; struct usb_wwan_port_private *portdata; portdata = usb_get_serial_port_data(port); value = ((portdata->rts_state) ? TIOCM_RTS : 0) | ((portdata->dtr_state) ? TIOCM_DTR : 0) | ((portdata->cts_state) ? TIOCM_CTS : 0) | ((portdata->dsr_state) ? TIOCM_DSR : 0) | ((portdata->dcd_state) ? TIOCM_CAR : 0) | ((portdata->ri_state) ? TIOCM_RNG : 0); return value; } EXPORT_SYMBOL(usb_wwan_tiocmget); int usb_wwan_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; portdata = usb_get_serial_port_data(port); intfdata = port->serial->private; if (!intfdata->send_setup) return -EINVAL; /* FIXME: what locks portdata fields ? */ if (set & TIOCM_RTS) portdata->rts_state = 1; if (set & TIOCM_DTR) portdata->dtr_state = 1; if (clear & TIOCM_RTS) portdata->rts_state = 0; if (clear & TIOCM_DTR) portdata->dtr_state = 0; return intfdata->send_setup(port); } EXPORT_SYMBOL(usb_wwan_tiocmset); static int get_serial_info(struct usb_serial_port *port, struct serial_struct __user *retinfo) { struct serial_struct tmp; if (!retinfo) return -EFAULT; memset(&tmp, 0, sizeof(tmp)); tmp.line = port->serial->minor; tmp.port = port->number; tmp.baud_base = tty_get_baud_rate(port->port.tty); tmp.close_delay = port->port.close_delay / 10; tmp.closing_wait = port->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ? ASYNC_CLOSING_WAIT_NONE : port->port.closing_wait / 10; if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) return -EFAULT; return 0; } static int set_serial_info(struct usb_serial_port *port, struct serial_struct __user *newinfo) { struct serial_struct new_serial; unsigned int closing_wait, close_delay; int retval = 0; if (copy_from_user(&new_serial, newinfo, sizeof(new_serial))) return -EFAULT; close_delay = new_serial.close_delay * 10; closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ? ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10; mutex_lock(&port->port.mutex); if (!capable(CAP_SYS_ADMIN)) { if ((close_delay != port->port.close_delay) || (closing_wait != port->port.closing_wait)) retval = -EPERM; else retval = -EOPNOTSUPP; } else { port->port.close_delay = close_delay; port->port.closing_wait = closing_wait; } mutex_unlock(&port->port.mutex); return retval; } int usb_wwan_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; dbg("%s cmd 0x%04x", __func__, cmd); switch (cmd) { case TIOCGSERIAL: return get_serial_info(port, (struct serial_struct __user *) arg); case TIOCSSERIAL: return set_serial_info(port, (struct serial_struct __user *) arg); default: break; } dbg("%s arg not supported", __func__); return -ENOIOCTLCMD; } EXPORT_SYMBOL(usb_wwan_ioctl); /* Write */ int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count) { struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; int i; int left, todo; struct urb *this_urb = NULL; /* spurious */ int err; unsigned long flags; portdata = usb_get_serial_port_data(port); intfdata = port->serial->private; dbg("%s: write (%d chars)", __func__, count); i = 0; left = count; for (i = 0; left > 0 && i < N_OUT_URB; i++) { todo = left; if (todo > OUT_BUFLEN) todo = OUT_BUFLEN; this_urb = portdata->out_urbs[i]; if (test_and_set_bit(i, &portdata->out_busy)) { if (time_before(jiffies, portdata->tx_start_time[i] + 10 * HZ)) continue; usb_unlink_urb(this_urb); continue; } dbg("%s: endpoint %d buf %d", __func__, usb_pipeendpoint(this_urb->pipe), i); err = usb_autopm_get_interface_async(port->serial->interface); if (err < 0) break; /* send the data */ memcpy(this_urb->transfer_buffer, buf, todo); this_urb->transfer_buffer_length = todo; spin_lock_irqsave(&intfdata->susp_lock, flags); if (intfdata->suspended) { usb_anchor_urb(this_urb, &portdata->delayed); spin_unlock_irqrestore(&intfdata->susp_lock, flags); } else { intfdata->in_flight++; spin_unlock_irqrestore(&intfdata->susp_lock, flags); usb_anchor_urb(this_urb, &portdata->submitted); err = usb_submit_urb(this_urb, GFP_ATOMIC); if (err) { dbg("usb_submit_urb %p (write bulk) failed " "(%d)", this_urb, err); usb_unanchor_urb(this_urb); clear_bit(i, &portdata->out_busy); spin_lock_irqsave(&intfdata->susp_lock, flags); intfdata->in_flight--; spin_unlock_irqrestore(&intfdata->susp_lock, flags); usb_autopm_put_interface_async(port->serial->interface); break; } } portdata->tx_start_time[i] = jiffies; buf += todo; left -= todo; } count -= left; dbg("%s: wrote (did %d)", __func__, count); return count; } EXPORT_SYMBOL(usb_wwan_write); static void usb_wwan_in_work(struct work_struct *w) { struct usb_wwan_port_private *portdata = container_of(w, struct usb_wwan_port_private, in_work); struct usb_wwan_intf_private *intfdata; struct list_head *q = &portdata->in_urb_list; struct urb *urb; unsigned char *data; struct tty_struct *tty; struct usb_serial_port *port; int err; ssize_t len; ssize_t count; unsigned long flags; spin_lock_irqsave(&portdata->in_lock, flags); while (!list_empty(q)) { urb = list_first_entry(q, struct urb, urb_list); port = urb->context; if (port->throttle_req || port->throttled) break; tty = tty_port_tty_get(&port->port); if (!tty) break; /* list_empty() will still be false after this; it means * URB is still being processed */ list_del(&urb->urb_list); spin_unlock_irqrestore(&portdata->in_lock, flags); len = urb->actual_length - portdata->n_read; data = urb->transfer_buffer + portdata->n_read; count = tty_insert_flip_string(tty, data, len); tty_flip_buffer_push(tty); tty_kref_put(tty); if (count < len) { dbg("%s: len:%d count:%d n_read:%d\n", __func__, len, count, portdata->n_read); portdata->n_read += count; port->throttled = true; /* add request back to list */ spin_lock_irqsave(&portdata->in_lock, flags); list_add(&urb->urb_list, q); spin_unlock_irqrestore(&portdata->in_lock, flags); return; } /* re-init list pointer to indicate we are done with it */ INIT_LIST_HEAD(&urb->urb_list); portdata->n_read = 0; intfdata = port->serial->private; spin_lock_irqsave(&intfdata->susp_lock, flags); if (!intfdata->suspended && !urb->anchor) { usb_anchor_urb(urb, &portdata->submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { usb_unanchor_urb(urb); if (err != -EPERM) pr_err("%s: submit read urb failed:%d", __func__, err); } usb_mark_last_busy(port->serial->dev); } spin_unlock_irqrestore(&intfdata->susp_lock, flags); spin_lock_irqsave(&portdata->in_lock, flags); } spin_unlock_irqrestore(&portdata->in_lock, flags); } static void usb_wwan_indat_callback(struct urb *urb) { int err; int endpoint; struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; struct usb_serial_port *port; int status = urb->status; unsigned long flags; dbg("%s: %p", __func__, urb); endpoint = usb_pipeendpoint(urb->pipe); port = urb->context; portdata = usb_get_serial_port_data(port); intfdata = port->serial->private; usb_mark_last_busy(port->serial->dev); if ((status == -ENOENT || !status) && urb->actual_length) { spin_lock_irqsave(&portdata->in_lock, flags); list_add_tail(&urb->urb_list, &portdata->in_urb_list); spin_unlock_irqrestore(&portdata->in_lock, flags); queue_work(system_nrt_wq, &portdata->in_work); return; } dbg("%s: nonzero status: %d on endpoint %02x.", __func__, status, endpoint); spin_lock(&intfdata->susp_lock); if (intfdata->suspended || !portdata->opened) { spin_unlock(&intfdata->susp_lock); return; } spin_unlock(&intfdata->susp_lock); if (status != -ESHUTDOWN) { usb_anchor_urb(urb, &portdata->submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { usb_unanchor_urb(urb); if (err != -EPERM) pr_err("%s: submit read urb failed:%d", __func__, err); } } } static void usb_wwan_outdat_callback(struct urb *urb) { struct usb_serial_port *port; struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; int i; dbg("%s", __func__); port = urb->context; intfdata = port->serial->private; usb_serial_port_softint(port); usb_autopm_put_interface_async(port->serial->interface); portdata = usb_get_serial_port_data(port); spin_lock(&intfdata->susp_lock); intfdata->in_flight--; spin_unlock(&intfdata->susp_lock); for (i = 0; i < N_OUT_URB; ++i) { if (portdata->out_urbs[i] == urb) { smp_mb__before_clear_bit(); clear_bit(i, &portdata->out_busy); break; } } } int usb_wwan_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_wwan_port_private *portdata; int i; int data_len = 0; struct urb *this_urb; portdata = usb_get_serial_port_data(port); for (i = 0; i < N_OUT_URB; i++) { this_urb = portdata->out_urbs[i]; if (this_urb && !test_bit(i, &portdata->out_busy)) data_len += OUT_BUFLEN; } dbg("%s: %d", __func__, data_len); return data_len; } EXPORT_SYMBOL(usb_wwan_write_room); int usb_wwan_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_wwan_port_private *portdata; int i; int data_len = 0; struct urb *this_urb; portdata = usb_get_serial_port_data(port); for (i = 0; i < N_OUT_URB; i++) { this_urb = portdata->out_urbs[i]; /* FIXME: This locking is insufficient as this_urb may go unused during the test */ if (this_urb && test_bit(i, &portdata->out_busy)) data_len += this_urb->transfer_buffer_length; } dbg("%s: %d", __func__, data_len); return data_len; } EXPORT_SYMBOL(usb_wwan_chars_in_buffer); void usb_wwan_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; port->throttle_req = true; dbg("%s:\n", __func__); } EXPORT_SYMBOL(usb_wwan_throttle); void usb_wwan_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_wwan_port_private *portdata; portdata = usb_get_serial_port_data(port); dbg("%s:\n", __func__); port->throttle_req = false; port->throttled = false; queue_work(system_nrt_wq, &portdata->in_work); } EXPORT_SYMBOL(usb_wwan_unthrottle); int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; struct usb_serial *serial = port->serial; int i, err; struct urb *urb; portdata = usb_get_serial_port_data(port); intfdata = serial->private; /* explicitly set the driver mode to raw */ tty->raw = 1; tty->real_raw = 1; set_bit(TTY_NO_WRITE_SPLIT, &tty->flags); dbg("%s", __func__); /* Start reading from the IN endpoint */ for (i = 0; i < N_IN_URB; i++) { urb = portdata->in_urbs[i]; if (!urb) continue; usb_anchor_urb(urb, &portdata->submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) { usb_unanchor_urb(urb); dbg("%s: submit urb %d failed (%d) %d", __func__, i, err, urb->transfer_buffer_length); } } if (intfdata->send_setup) intfdata->send_setup(port); serial->interface->needs_remote_wakeup = 1; spin_lock_irq(&intfdata->susp_lock); portdata->opened = 1; spin_unlock_irq(&intfdata->susp_lock); /* this balances a get in the generic USB serial code */ usb_autopm_put_interface(serial->interface); return 0; } EXPORT_SYMBOL(usb_wwan_open); void usb_wwan_close(struct usb_serial_port *port) { int i; struct usb_serial *serial = port->serial; struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata = port->serial->private; dbg("%s", __func__); portdata = usb_get_serial_port_data(port); if (serial->dev) { /* Stop reading/writing urbs */ spin_lock_irq(&intfdata->susp_lock); portdata->opened = 0; spin_unlock_irq(&intfdata->susp_lock); for (i = 0; i < N_IN_URB; i++) usb_kill_urb(portdata->in_urbs[i]); for (i = 0; i < N_OUT_URB; i++) usb_kill_urb(portdata->out_urbs[i]); /* balancing - important as an error cannot be handled*/ usb_autopm_get_interface_no_resume(serial->interface); serial->interface->needs_remote_wakeup = 0; } } EXPORT_SYMBOL(usb_wwan_close); /* Helper functions used by usb_wwan_setup_urbs */ static struct urb *usb_wwan_setup_urb(struct usb_serial *serial, int endpoint, int dir, void *ctx, char *buf, int len, void (*callback) (struct urb *)) { struct urb *urb; if (endpoint == -1) return NULL; /* endpoint not needed */ urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */ if (urb == NULL) { dbg("%s: alloc for endpoint %d failed.", __func__, endpoint); return NULL; } /* Fill URB using supplied data. */ usb_fill_bulk_urb(urb, serial->dev, usb_sndbulkpipe(serial->dev, endpoint) | dir, buf, len, callback, ctx); return urb; } /* Setup urbs */ static void usb_wwan_setup_urbs(struct usb_serial *serial) { int i, j; struct usb_serial_port *port; struct usb_wwan_port_private *portdata; dbg("%s", __func__); for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; portdata = usb_get_serial_port_data(port); /* Do indat endpoints first */ for (j = 0; j < N_IN_URB; ++j) { portdata->in_urbs[j] = usb_wwan_setup_urb(serial, port-> bulk_in_endpointAddress, USB_DIR_IN, port, portdata-> in_buffer[j], IN_BUFLEN, usb_wwan_indat_callback); } /* outdat endpoints */ for (j = 0; j < N_OUT_URB; ++j) { portdata->out_urbs[j] = usb_wwan_setup_urb(serial, port-> bulk_out_endpointAddress, USB_DIR_OUT, port, portdata-> out_buffer [j], OUT_BUFLEN, usb_wwan_outdat_callback); } } } int usb_wwan_startup(struct usb_serial *serial) { int i, j, err; struct usb_serial_port *port; struct usb_wwan_port_private *portdata; u8 *buffer; dbg("%s", __func__); /* Now setup per port private data */ for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; portdata = kzalloc(sizeof(*portdata), GFP_KERNEL); if (!portdata) { dbg("%s: kmalloc for usb_wwan_port_private (%d) failed!.", __func__, i); return 1; } init_usb_anchor(&portdata->delayed); init_usb_anchor(&portdata->submitted); INIT_WORK(&portdata->in_work, usb_wwan_in_work); INIT_LIST_HEAD(&portdata->in_urb_list); spin_lock_init(&portdata->in_lock); for (j = 0; j < N_IN_URB; j++) { buffer = kmalloc(IN_BUFLEN, GFP_KERNEL); if (!buffer) goto bail_out_error; portdata->in_buffer[j] = buffer; } for (j = 0; j < N_OUT_URB; j++) { buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL); if (!buffer) goto bail_out_error2; portdata->out_buffer[j] = buffer; } usb_set_serial_port_data(port, portdata); if (!port->interrupt_in_urb) continue; err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (err) dbg("%s: submit irq_in urb failed %d", __func__, err); } usb_wwan_setup_urbs(serial); return 0; bail_out_error2: for (j = 0; j < N_OUT_URB; j++) kfree(portdata->out_buffer[j]); bail_out_error: for (j = 0; j < N_IN_URB; j++) kfree(portdata->in_buffer[j]); kfree(portdata); return 1; } EXPORT_SYMBOL(usb_wwan_startup); static void stop_read_write_urbs(struct usb_serial *serial) { int i; struct usb_serial_port *port; struct usb_wwan_port_private *portdata; /* Stop reading/writing urbs */ for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; portdata = usb_get_serial_port_data(port); usb_kill_anchored_urbs(&portdata->submitted); } } void usb_wwan_disconnect(struct usb_serial *serial) { dbg("%s", __func__); stop_read_write_urbs(serial); } EXPORT_SYMBOL(usb_wwan_disconnect); void usb_wwan_release(struct usb_serial *serial) { int i, j; struct usb_serial_port *port; struct usb_wwan_port_private *portdata; struct urb *urb; struct list_head *q; unsigned long flags; /* Now free them */ for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; portdata = usb_get_serial_port_data(port); cancel_work_sync(&portdata->in_work); /* TBD: do we really need this */ spin_lock_irqsave(&portdata->in_lock, flags); q = &portdata->in_urb_list; while (!list_empty(q)) { urb = list_first_entry(q, struct urb, urb_list); list_del_init(&urb->urb_list); } spin_unlock_irqrestore(&portdata->in_lock, flags); for (j = 0; j < N_IN_URB; j++) { usb_free_urb(portdata->in_urbs[j]); kfree(portdata->in_buffer[j]); portdata->in_urbs[j] = NULL; } for (j = 0; j < N_OUT_URB; j++) { usb_free_urb(portdata->out_urbs[j]); kfree(portdata->out_buffer[j]); portdata->out_urbs[j] = NULL; } } /* Now free per port private data */ for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; kfree(usb_get_serial_port_data(port)); } } EXPORT_SYMBOL(usb_wwan_release); #ifdef CONFIG_PM int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message) { struct usb_wwan_intf_private *intfdata = serial->private; int b; dbg("%s entered", __func__); if (PMSG_IS_AUTO(message)) { spin_lock_irq(&intfdata->susp_lock); b = intfdata->in_flight; spin_unlock_irq(&intfdata->susp_lock); if (b || pm_runtime_autosuspend_expiration(&serial->dev->dev)) return -EBUSY; } spin_lock_irq(&intfdata->susp_lock); intfdata->suspended = 1; spin_unlock_irq(&intfdata->susp_lock); stop_read_write_urbs(serial); return 0; } EXPORT_SYMBOL(usb_wwan_suspend); static void unbusy_queued_urb(struct urb *urb, struct usb_wwan_port_private *portdata) { int i; for (i = 0; i < N_OUT_URB; i++) { if (urb == portdata->out_urbs[i]) { clear_bit(i, &portdata->out_busy); break; } } } static void play_delayed(struct usb_serial_port *port) { struct usb_wwan_intf_private *data; struct usb_wwan_port_private *portdata; struct urb *urb; int err; portdata = usb_get_serial_port_data(port); data = port->serial->private; while ((urb = usb_get_from_anchor(&portdata->delayed))) { usb_anchor_urb(urb, &portdata->submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (!err) { data->in_flight++; } else { usb_unanchor_urb(urb); /* we have to throw away the rest */ do { unbusy_queued_urb(urb, portdata); usb_autopm_put_interface_no_suspend(port->serial->interface); } while ((urb = usb_get_from_anchor(&portdata->delayed))); break; } } } int usb_wwan_resume(struct usb_serial *serial) { int i, j; struct usb_serial_port *port; struct usb_wwan_intf_private *intfdata = serial->private; struct usb_wwan_port_private *portdata; struct urb *urb; int err = 0; dbg("%s entered", __func__); /* get the interrupt URBs resubmitted unconditionally */ for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; if (!port->interrupt_in_urb) { dbg("%s: No interrupt URB for port %d", __func__, i); continue; } err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); dbg("Submitted interrupt URB for port %d (result %d)", i, err); if (err < 0) { err("%s: Error %d for interrupt URB of port%d", __func__, err, i); goto err_out; } } spin_lock_irq(&intfdata->susp_lock); intfdata->suspended = 0; for (i = 0; i < serial->num_ports; i++) { /* walk all ports */ port = serial->port[i]; portdata = usb_get_serial_port_data(port); /* skip closed ports */ if (!portdata->opened) continue; for (j = 0; j < N_IN_URB; j++) { urb = portdata->in_urbs[j]; /* don't re-submit if it already was submitted or if * it is being processed by in_work */ if (urb->anchor || !list_empty(&urb->urb_list)) continue; usb_anchor_urb(urb, &portdata->submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { err("%s: Error %d for bulk URB[%d]:%p %d", __func__, err, j, urb, i); usb_unanchor_urb(urb); intfdata->suspended = 1; spin_unlock_irq(&intfdata->susp_lock); goto err_out; } } play_delayed(port); } spin_unlock_irq(&intfdata->susp_lock); err_out: return err; } EXPORT_SYMBOL(usb_wwan_resume); #endif MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug messages");
gpl-2.0
programmecat/linux
drivers/clk/tegra/clk-tegra-fixed.c
417
2877
/* * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/io.h> #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/clk/tegra.h> #include "clk.h" #include "clk-id.h" #define OSC_CTRL 0x50 #define OSC_CTRL_OSC_FREQ_SHIFT 28 #define OSC_CTRL_PLL_REF_DIV_SHIFT 26 int __init tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *clks, unsigned long *input_freqs, unsigned int num, unsigned int clk_m_div, unsigned long *osc_freq, unsigned long *pll_ref_freq) { struct clk *clk, *osc; struct clk **dt_clk; u32 val, pll_ref_div; unsigned osc_idx; val = readl_relaxed(clk_base + OSC_CTRL); osc_idx = val >> OSC_CTRL_OSC_FREQ_SHIFT; if (osc_idx < num) *osc_freq = input_freqs[osc_idx]; else *osc_freq = 0; if (!*osc_freq) { WARN_ON(1); return -EINVAL; } osc = clk_register_fixed_rate(NULL, "osc", NULL, CLK_IS_ROOT, *osc_freq); dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m, clks); if (!dt_clk) return 0; clk = clk_register_fixed_factor(NULL, "clk_m", "osc", 0, 1, clk_m_div); *dt_clk = clk; /* pll_ref */ val = (val >> OSC_CTRL_PLL_REF_DIV_SHIFT) & 3; pll_ref_div = 1 << val; dt_clk = tegra_lookup_dt_id(tegra_clk_pll_ref, clks); if (!dt_clk) return 0; clk = clk_register_fixed_factor(NULL, "pll_ref", "osc", 0, 1, pll_ref_div); *dt_clk = clk; if (pll_ref_freq) *pll_ref_freq = *osc_freq / pll_ref_div; return 0; } void __init tegra_fixed_clk_init(struct tegra_clk *tegra_clks) { struct clk *clk; struct clk **dt_clk; /* clk_32k */ dt_clk = tegra_lookup_dt_id(tegra_clk_clk_32k, tegra_clks); if (dt_clk) { clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, CLK_IS_ROOT, 32768); *dt_clk = clk; } /* clk_m_div2 */ dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m_div2, tegra_clks); if (dt_clk) { clk = clk_register_fixed_factor(NULL, "clk_m_div2", "clk_m", CLK_SET_RATE_PARENT, 1, 2); *dt_clk = clk; } /* clk_m_div4 */ dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m_div4, tegra_clks); if (dt_clk) { clk = clk_register_fixed_factor(NULL, "clk_m_div4", "clk_m", CLK_SET_RATE_PARENT, 1, 4); *dt_clk = clk; } }
gpl-2.0
alianmohammad/linux-kernel-3.18-hacks
sound/pci/ice1712/wm8766.c
929
10239
/* * ALSA driver for ICEnsemble VT17xx * * Lowlevel functions for WM8766 codec * * Copyright (c) 2012 Ondrej Zary <linux@rainbow-software.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <sound/core.h> #include <sound/control.h> #include <sound/tlv.h> #include "wm8766.h" /* low-level access */ static void snd_wm8766_write(struct snd_wm8766 *wm, u16 addr, u16 data) { if (addr < WM8766_REG_COUNT) wm->regs[addr] = data; wm->ops.write(wm, addr, data); } /* mixer controls */ static const DECLARE_TLV_DB_SCALE(wm8766_tlv, -12750, 50, 1); static struct snd_wm8766_ctl snd_wm8766_default_ctl[WM8766_CTL_COUNT] = { [WM8766_CTL_CH1_VOL] = { .name = "Channel 1 Playback Volume", .type = SNDRV_CTL_ELEM_TYPE_INTEGER, .tlv = wm8766_tlv, .reg1 = WM8766_REG_DACL1, .reg2 = WM8766_REG_DACR1, .mask1 = WM8766_VOL_MASK, .mask2 = WM8766_VOL_MASK, .max = 0xff, .flags = WM8766_FLAG_STEREO | WM8766_FLAG_VOL_UPDATE, }, [WM8766_CTL_CH2_VOL] = { .name = "Channel 2 Playback Volume", .type = SNDRV_CTL_ELEM_TYPE_INTEGER, .tlv = wm8766_tlv, .reg1 = WM8766_REG_DACL2, .reg2 = WM8766_REG_DACR2, .mask1 = WM8766_VOL_MASK, .mask2 = WM8766_VOL_MASK, .max = 0xff, .flags = WM8766_FLAG_STEREO | WM8766_FLAG_VOL_UPDATE, }, [WM8766_CTL_CH3_VOL] = { .name = "Channel 3 Playback Volume", .type = SNDRV_CTL_ELEM_TYPE_INTEGER, .tlv = wm8766_tlv, .reg1 = WM8766_REG_DACL3, .reg2 = WM8766_REG_DACR3, .mask1 = WM8766_VOL_MASK, .mask2 = WM8766_VOL_MASK, .max = 0xff, .flags = WM8766_FLAG_STEREO | WM8766_FLAG_VOL_UPDATE, }, [WM8766_CTL_CH1_SW] = { .name = "Channel 1 Playback Switch", .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN, .reg1 = WM8766_REG_DACCTRL2, .mask1 = WM8766_DAC2_MUTE1, .flags = WM8766_FLAG_INVERT, }, [WM8766_CTL_CH2_SW] = { .name = "Channel 2 Playback Switch", .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN, .reg1 = WM8766_REG_DACCTRL2, .mask1 = WM8766_DAC2_MUTE2, .flags = WM8766_FLAG_INVERT, }, [WM8766_CTL_CH3_SW] = { .name = "Channel 3 Playback Switch", .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN, .reg1 = WM8766_REG_DACCTRL2, .mask1 = WM8766_DAC2_MUTE3, .flags = WM8766_FLAG_INVERT, }, [WM8766_CTL_PHASE1_SW] = { .name = "Channel 1 Phase Invert Playback Switch", .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN, .reg1 = WM8766_REG_IFCTRL, .mask1 = WM8766_PHASE_INVERT1, }, [WM8766_CTL_PHASE2_SW] = { .name = "Channel 2 Phase Invert Playback Switch", .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN, .reg1 = WM8766_REG_IFCTRL, .mask1 = WM8766_PHASE_INVERT2, }, [WM8766_CTL_PHASE3_SW] = { .name = "Channel 3 Phase Invert Playback Switch", .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN, .reg1 = WM8766_REG_IFCTRL, .mask1 = WM8766_PHASE_INVERT3, }, [WM8766_CTL_DEEMPH1_SW] = { .name = "Channel 1 Deemphasis Playback Switch", .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN, .reg1 = WM8766_REG_DACCTRL2, .mask1 = WM8766_DAC2_DEEMP1, }, [WM8766_CTL_DEEMPH2_SW] = { .name = "Channel 2 Deemphasis Playback Switch", .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN, .reg1 = WM8766_REG_DACCTRL2, .mask1 = WM8766_DAC2_DEEMP2, }, [WM8766_CTL_DEEMPH3_SW] = { .name = "Channel 3 Deemphasis Playback Switch", .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN, .reg1 = WM8766_REG_DACCTRL2, .mask1 = WM8766_DAC2_DEEMP3, }, [WM8766_CTL_IZD_SW] = { .name = "Infinite Zero Detect Playback Switch", .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN, .reg1 = WM8766_REG_DACCTRL1, .mask1 = WM8766_DAC_IZD, }, [WM8766_CTL_ZC_SW] = { .name = "Zero Cross Detect Playback Switch", .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN, .reg1 = WM8766_REG_DACCTRL2, .mask1 = WM8766_DAC2_ZCD, .flags = WM8766_FLAG_INVERT, }, }; /* exported functions */ void snd_wm8766_init(struct snd_wm8766 *wm) { int i; static const u16 default_values[] = { 0x000, 0x100, 0x120, 0x000, 0x000, 0x100, 0x000, 0x100, 0x000, 0x000, 0x080, }; memcpy(wm->ctl, snd_wm8766_default_ctl, sizeof(wm->ctl)); snd_wm8766_write(wm, WM8766_REG_RESET, 0x00); /* reset */ udelay(10); /* load defaults */ for (i = 0; i < ARRAY_SIZE(default_values); i++) snd_wm8766_write(wm, i, default_values[i]); } void snd_wm8766_resume(struct snd_wm8766 *wm) { int i; for (i = 0; i < WM8766_REG_COUNT; i++) snd_wm8766_write(wm, i, wm->regs[i]); } void snd_wm8766_set_if(struct snd_wm8766 *wm, u16 dac) { u16 val = wm->regs[WM8766_REG_IFCTRL] & ~WM8766_IF_MASK; dac &= WM8766_IF_MASK; snd_wm8766_write(wm, WM8766_REG_IFCTRL, val | dac); } void snd_wm8766_set_master_mode(struct snd_wm8766 *wm, u16 mode) { u16 val = wm->regs[WM8766_REG_DACCTRL3] & ~WM8766_DAC3_MSTR_MASK; mode &= WM8766_DAC3_MSTR_MASK; snd_wm8766_write(wm, WM8766_REG_DACCTRL3, val | mode); } void snd_wm8766_set_power(struct snd_wm8766 *wm, u16 power) { u16 val = wm->regs[WM8766_REG_DACCTRL3] & ~WM8766_DAC3_POWER_MASK; power &= WM8766_DAC3_POWER_MASK; snd_wm8766_write(wm, WM8766_REG_DACCTRL3, val | power); } void snd_wm8766_volume_restore(struct snd_wm8766 *wm) { u16 val = wm->regs[WM8766_REG_DACR1]; /* restore volume after MCLK stopped */ snd_wm8766_write(wm, WM8766_REG_DACR1, val | WM8766_VOL_UPDATE); } /* mixer callbacks */ static int snd_wm8766_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_wm8766 *wm = snd_kcontrol_chip(kcontrol); int n = kcontrol->private_value; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = (wm->ctl[n].flags & WM8766_FLAG_STEREO) ? 2 : 1; uinfo->value.integer.min = wm->ctl[n].min; uinfo->value.integer.max = wm->ctl[n].max; return 0; } static int snd_wm8766_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_wm8766 *wm = snd_kcontrol_chip(kcontrol); int n = kcontrol->private_value; return snd_ctl_enum_info(uinfo, 1, wm->ctl[n].max, wm->ctl[n].enum_names); } static int snd_wm8766_ctl_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_wm8766 *wm = snd_kcontrol_chip(kcontrol); int n = kcontrol->private_value; u16 val1, val2; if (wm->ctl[n].get) wm->ctl[n].get(wm, &val1, &val2); else { val1 = wm->regs[wm->ctl[n].reg1] & wm->ctl[n].mask1; val1 >>= __ffs(wm->ctl[n].mask1); if (wm->ctl[n].flags & WM8766_FLAG_STEREO) { val2 = wm->regs[wm->ctl[n].reg2] & wm->ctl[n].mask2; val2 >>= __ffs(wm->ctl[n].mask2); if (wm->ctl[n].flags & WM8766_FLAG_VOL_UPDATE) val2 &= ~WM8766_VOL_UPDATE; } } if (wm->ctl[n].flags & WM8766_FLAG_INVERT) { val1 = wm->ctl[n].max - (val1 - wm->ctl[n].min); if (wm->ctl[n].flags & WM8766_FLAG_STEREO) val2 = wm->ctl[n].max - (val2 - wm->ctl[n].min); } ucontrol->value.integer.value[0] = val1; if (wm->ctl[n].flags & WM8766_FLAG_STEREO) ucontrol->value.integer.value[1] = val2; return 0; } static int snd_wm8766_ctl_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_wm8766 *wm = snd_kcontrol_chip(kcontrol); int n = kcontrol->private_value; u16 val, regval1, regval2; /* this also works for enum because value is an union */ regval1 = ucontrol->value.integer.value[0]; regval2 = ucontrol->value.integer.value[1]; if (wm->ctl[n].flags & WM8766_FLAG_INVERT) { regval1 = wm->ctl[n].max - (regval1 - wm->ctl[n].min); regval2 = wm->ctl[n].max - (regval2 - wm->ctl[n].min); } if (wm->ctl[n].set) wm->ctl[n].set(wm, regval1, regval2); else { val = wm->regs[wm->ctl[n].reg1] & ~wm->ctl[n].mask1; val |= regval1 << __ffs(wm->ctl[n].mask1); /* both stereo controls in one register */ if (wm->ctl[n].flags & WM8766_FLAG_STEREO && wm->ctl[n].reg1 == wm->ctl[n].reg2) { val &= ~wm->ctl[n].mask2; val |= regval2 << __ffs(wm->ctl[n].mask2); } snd_wm8766_write(wm, wm->ctl[n].reg1, val); /* stereo controls in different registers */ if (wm->ctl[n].flags & WM8766_FLAG_STEREO && wm->ctl[n].reg1 != wm->ctl[n].reg2) { val = wm->regs[wm->ctl[n].reg2] & ~wm->ctl[n].mask2; val |= regval2 << __ffs(wm->ctl[n].mask2); if (wm->ctl[n].flags & WM8766_FLAG_VOL_UPDATE) val |= WM8766_VOL_UPDATE; snd_wm8766_write(wm, wm->ctl[n].reg2, val); } } return 0; } static int snd_wm8766_add_control(struct snd_wm8766 *wm, int num) { struct snd_kcontrol_new cont; struct snd_kcontrol *ctl; memset(&cont, 0, sizeof(cont)); cont.iface = SNDRV_CTL_ELEM_IFACE_MIXER; cont.private_value = num; cont.name = wm->ctl[num].name; cont.access = SNDRV_CTL_ELEM_ACCESS_READWRITE; if (wm->ctl[num].flags & WM8766_FLAG_LIM || wm->ctl[num].flags & WM8766_FLAG_ALC) cont.access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; cont.tlv.p = NULL; cont.get = snd_wm8766_ctl_get; cont.put = snd_wm8766_ctl_put; switch (wm->ctl[num].type) { case SNDRV_CTL_ELEM_TYPE_INTEGER: cont.info = snd_wm8766_volume_info; cont.access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ; cont.tlv.p = wm->ctl[num].tlv; break; case SNDRV_CTL_ELEM_TYPE_BOOLEAN: wm->ctl[num].max = 1; if (wm->ctl[num].flags & WM8766_FLAG_STEREO) cont.info = snd_ctl_boolean_stereo_info; else cont.info = snd_ctl_boolean_mono_info; break; case SNDRV_CTL_ELEM_TYPE_ENUMERATED: cont.info = snd_wm8766_enum_info; break; default: return -EINVAL; } ctl = snd_ctl_new1(&cont, wm); if (!ctl) return -ENOMEM; wm->ctl[num].kctl = ctl; return snd_ctl_add(wm->card, ctl); } int snd_wm8766_build_controls(struct snd_wm8766 *wm) { int err, i; for (i = 0; i < WM8766_CTL_COUNT; i++) if (wm->ctl[i].name) { err = snd_wm8766_add_control(wm, i); if (err < 0) return err; } return 0; }
gpl-2.0
chaubeyprateek/Hyper_Kernel_Redmi2
arch/m68k/bvme6000/config.c
2209
9925
/* * arch/m68k/bvme6000/config.c * * Copyright (C) 1997 Richard Hirst [richard@sleepie.demon.co.uk] * * Based on: * * linux/amiga/config.c * * Copyright (C) 1993 Hamish Macdonald * * This file is subject to the terms and conditions of the GNU General Public * License. See the file README.legal in the main directory of this archive * for more details. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/console.h> #include <linux/linkage.h> #include <linux/init.h> #include <linux/major.h> #include <linux/genhd.h> #include <linux/rtc.h> #include <linux/interrupt.h> #include <linux/bcd.h> #include <asm/bootinfo.h> #include <asm/pgtable.h> #include <asm/setup.h> #include <asm/irq.h> #include <asm/traps.h> #include <asm/rtc.h> #include <asm/machdep.h> #include <asm/bvme6000hw.h> static void bvme6000_get_model(char *model); extern void bvme6000_sched_init(irq_handler_t handler); extern u32 bvme6000_gettimeoffset(void); extern int bvme6000_hwclk (int, struct rtc_time *); extern int bvme6000_set_clock_mmss (unsigned long); extern void bvme6000_reset (void); void bvme6000_set_vectors (void); /* Save tick handler routine pointer, will point to xtime_update() in * kernel/timer/timekeeping.c, called via bvme6000_process_int() */ static irq_handler_t tick_handler; int bvme6000_parse_bootinfo(const struct bi_record *bi) { if (bi->tag == BI_VME_TYPE) return 0; else return 1; } void bvme6000_reset(void) { volatile PitRegsPtr pit = (PitRegsPtr)BVME_PIT_BASE; printk ("\r\n\nCalled bvme6000_reset\r\n" "\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r"); /* The string of returns is to delay the reset until the whole * message is output. */ /* Enable the watchdog, via PIT port C bit 4 */ pit->pcddr |= 0x10; /* WDOG enable */ while(1) ; } static void bvme6000_get_model(char *model) { sprintf(model, "BVME%d000", m68k_cputype == CPU_68060 ? 6 : 4); } /* * This function is called during kernel startup to initialize * the bvme6000 IRQ handling routines. */ static void __init bvme6000_init_IRQ(void) { m68k_setup_user_interrupt(VEC_USER, 192); } void __init config_bvme6000(void) { volatile PitRegsPtr pit = (PitRegsPtr)BVME_PIT_BASE; /* Board type is only set by newer versions of vmelilo/tftplilo */ if (!vme_brdtype) { if (m68k_cputype == CPU_68060) vme_brdtype = VME_TYPE_BVME6000; else vme_brdtype = VME_TYPE_BVME4000; } #if 0 /* Call bvme6000_set_vectors() so ABORT will work, along with BVMBug * debugger. Note trap_init() will splat the abort vector, but * bvme6000_init_IRQ() will put it back again. Hopefully. */ bvme6000_set_vectors(); #endif mach_max_dma_address = 0xffffffff; mach_sched_init = bvme6000_sched_init; mach_init_IRQ = bvme6000_init_IRQ; arch_gettimeoffset = bvme6000_gettimeoffset; mach_hwclk = bvme6000_hwclk; mach_set_clock_mmss = bvme6000_set_clock_mmss; mach_reset = bvme6000_reset; mach_get_model = bvme6000_get_model; printk ("Board is %sconfigured as a System Controller\n", *config_reg_ptr & BVME_CONFIG_SW1 ? "" : "not "); /* Now do the PIT configuration */ pit->pgcr = 0x00; /* Unidirectional 8 bit, no handshake for now */ pit->psrr = 0x18; /* PIACK and PIRQ functions enabled */ pit->pacr = 0x00; /* Sub Mode 00, H2 i/p, no DMA */ pit->padr = 0x00; /* Just to be tidy! */ pit->paddr = 0x00; /* All inputs for now (safest) */ pit->pbcr = 0x80; /* Sub Mode 1x, H4 i/p, no DMA */ pit->pbdr = 0xbc | (*config_reg_ptr & BVME_CONFIG_SW1 ? 0 : 0x40); /* PRI, SYSCON?, Level3, SCC clks from xtal */ pit->pbddr = 0xf3; /* Mostly outputs */ pit->pcdr = 0x01; /* PA transceiver disabled */ pit->pcddr = 0x03; /* WDOG disable */ /* Disable snooping for Ethernet and VME accesses */ bvme_acr_addrctl = 0; } irqreturn_t bvme6000_abort_int (int irq, void *dev_id) { unsigned long *new = (unsigned long *)vectors; unsigned long *old = (unsigned long *)0xf8000000; /* Wait for button release */ while (*(volatile unsigned char *)BVME_LOCAL_IRQ_STAT & BVME_ABORT_STATUS) ; *(new+4) = *(old+4); /* Illegal instruction */ *(new+9) = *(old+9); /* Trace */ *(new+47) = *(old+47); /* Trap #15 */ *(new+0x1f) = *(old+0x1f); /* ABORT switch */ return IRQ_HANDLED; } static irqreturn_t bvme6000_timer_int (int irq, void *dev_id) { volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE; unsigned char msr = rtc->msr & 0xc0; rtc->msr = msr | 0x20; /* Ack the interrupt */ return tick_handler(irq, dev_id); } /* * Set up the RTC timer 1 to mode 2, so T1 output toggles every 5ms * (40000 x 125ns). It will interrupt every 10ms, when T1 goes low. * So, when reading the elapsed time, you should read timer1, * subtract it from 39999, and then add 40000 if T1 is high. * That gives you the number of 125ns ticks in to the 10ms period, * so divide by 8 to get the microsecond result. */ void bvme6000_sched_init (irq_handler_t timer_routine) { volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE; unsigned char msr = rtc->msr & 0xc0; rtc->msr = 0; /* Ensure timer registers accessible */ tick_handler = timer_routine; if (request_irq(BVME_IRQ_RTC, bvme6000_timer_int, 0, "timer", bvme6000_timer_int)) panic ("Couldn't register timer int"); rtc->t1cr_omr = 0x04; /* Mode 2, ext clk */ rtc->t1msb = 39999 >> 8; rtc->t1lsb = 39999 & 0xff; rtc->irr_icr1 &= 0xef; /* Route timer 1 to INTR pin */ rtc->msr = 0x40; /* Access int.cntrl, etc */ rtc->pfr_icr0 = 0x80; /* Just timer 1 ints enabled */ rtc->irr_icr1 = 0; rtc->t1cr_omr = 0x0a; /* INTR+T1 active lo, push-pull */ rtc->t0cr_rtmr &= 0xdf; /* Stop timers in standby */ rtc->msr = 0; /* Access timer 1 control */ rtc->t1cr_omr = 0x05; /* Mode 2, ext clk, GO */ rtc->msr = msr; if (request_irq(BVME_IRQ_ABORT, bvme6000_abort_int, 0, "abort", bvme6000_abort_int)) panic ("Couldn't register abort int"); } /* This is always executed with interrupts disabled. */ /* * NOTE: Don't accept any readings within 5us of rollover, as * the T1INT bit may be a little slow getting set. There is also * a fault in the chip, meaning that reads may produce invalid * results... */ u32 bvme6000_gettimeoffset(void) { volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE; volatile PitRegsPtr pit = (PitRegsPtr)BVME_PIT_BASE; unsigned char msr = rtc->msr & 0xc0; unsigned char t1int, t1op; u32 v = 800000, ov; rtc->msr = 0; /* Ensure timer registers accessible */ do { ov = v; t1int = rtc->msr & 0x20; t1op = pit->pcdr & 0x04; rtc->t1cr_omr |= 0x40; /* Latch timer1 */ v = rtc->t1msb << 8; /* Read timer1 */ v |= rtc->t1lsb; /* Read timer1 */ } while (t1int != (rtc->msr & 0x20) || t1op != (pit->pcdr & 0x04) || abs(ov-v) > 80 || v > 39960); v = 39999 - v; if (!t1op) /* If in second half cycle.. */ v += 40000; v /= 8; /* Convert ticks to microseconds */ if (t1int) v += 10000; /* Int pending, + 10ms */ rtc->msr = msr; return v * 1000; } /* * Looks like op is non-zero for setting the clock, and zero for * reading the clock. * * struct hwclk_time { * unsigned sec; 0..59 * unsigned min; 0..59 * unsigned hour; 0..23 * unsigned day; 1..31 * unsigned mon; 0..11 * unsigned year; 00... * int wday; 0..6, 0 is Sunday, -1 means unknown/don't set * }; */ int bvme6000_hwclk(int op, struct rtc_time *t) { volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE; unsigned char msr = rtc->msr & 0xc0; rtc->msr = 0x40; /* Ensure clock and real-time-mode-register * are accessible */ if (op) { /* Write.... */ rtc->t0cr_rtmr = t->tm_year%4; rtc->bcd_tenms = 0; rtc->bcd_sec = bin2bcd(t->tm_sec); rtc->bcd_min = bin2bcd(t->tm_min); rtc->bcd_hr = bin2bcd(t->tm_hour); rtc->bcd_dom = bin2bcd(t->tm_mday); rtc->bcd_mth = bin2bcd(t->tm_mon + 1); rtc->bcd_year = bin2bcd(t->tm_year%100); if (t->tm_wday >= 0) rtc->bcd_dow = bin2bcd(t->tm_wday+1); rtc->t0cr_rtmr = t->tm_year%4 | 0x08; } else { /* Read.... */ do { t->tm_sec = bcd2bin(rtc->bcd_sec); t->tm_min = bcd2bin(rtc->bcd_min); t->tm_hour = bcd2bin(rtc->bcd_hr); t->tm_mday = bcd2bin(rtc->bcd_dom); t->tm_mon = bcd2bin(rtc->bcd_mth)-1; t->tm_year = bcd2bin(rtc->bcd_year); if (t->tm_year < 70) t->tm_year += 100; t->tm_wday = bcd2bin(rtc->bcd_dow)-1; } while (t->tm_sec != bcd2bin(rtc->bcd_sec)); } rtc->msr = msr; return 0; } /* * Set the minutes and seconds from seconds value 'nowtime'. Fail if * clock is out by > 30 minutes. Logic lifted from atari code. * Algorithm is to wait for the 10ms register to change, and then to * wait a short while, and then set it. */ int bvme6000_set_clock_mmss (unsigned long nowtime) { int retval = 0; short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60; unsigned char rtc_minutes, rtc_tenms; volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE; unsigned char msr = rtc->msr & 0xc0; unsigned long flags; volatile int i; rtc->msr = 0; /* Ensure clock accessible */ rtc_minutes = bcd2bin (rtc->bcd_min); if ((rtc_minutes < real_minutes ? real_minutes - rtc_minutes : rtc_minutes - real_minutes) < 30) { local_irq_save(flags); rtc_tenms = rtc->bcd_tenms; while (rtc_tenms == rtc->bcd_tenms) ; for (i = 0; i < 1000; i++) ; rtc->bcd_min = bin2bcd(real_minutes); rtc->bcd_sec = bin2bcd(real_seconds); local_irq_restore(flags); } else retval = -1; rtc->msr = msr; return retval; }
gpl-2.0
pio-masaki/kernel_at300se
fs/afs/inode.c
2721
12514
/* * Copyright (c) 2002 Red Hat, Inc. All rights reserved. * * This software may be freely redistributed under the terms of the * GNU General Public License. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Authors: David Woodhouse <dwmw2@infradead.org> * David Howells <dhowells@redhat.com> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/sched.h> #include <linux/mount.h> #include <linux/namei.h> #include "internal.h" struct afs_iget_data { struct afs_fid fid; struct afs_volume *volume; /* volume on which resides */ }; /* * map the AFS file status to the inode member variables */ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key) { struct inode *inode = AFS_VNODE_TO_I(vnode); _debug("FS: ft=%d lk=%d sz=%llu ver=%Lu mod=%hu", vnode->status.type, vnode->status.nlink, (unsigned long long) vnode->status.size, vnode->status.data_version, vnode->status.mode); switch (vnode->status.type) { case AFS_FTYPE_FILE: inode->i_mode = S_IFREG | vnode->status.mode; inode->i_op = &afs_file_inode_operations; inode->i_fop = &afs_file_operations; break; case AFS_FTYPE_DIR: inode->i_mode = S_IFDIR | vnode->status.mode; inode->i_op = &afs_dir_inode_operations; inode->i_fop = &afs_dir_file_operations; break; case AFS_FTYPE_SYMLINK: inode->i_mode = S_IFLNK | vnode->status.mode; inode->i_op = &page_symlink_inode_operations; break; default: printk("kAFS: AFS vnode with undefined type\n"); return -EBADMSG; } #ifdef CONFIG_AFS_FSCACHE if (vnode->status.size != inode->i_size) fscache_attr_changed(vnode->cache); #endif inode->i_nlink = vnode->status.nlink; inode->i_uid = vnode->status.owner; inode->i_gid = 0; inode->i_size = vnode->status.size; inode->i_ctime.tv_sec = vnode->status.mtime_server; inode->i_ctime.tv_nsec = 0; inode->i_atime = inode->i_mtime = inode->i_ctime; inode->i_blocks = 0; inode->i_generation = vnode->fid.unique; inode->i_version = vnode->status.data_version; inode->i_mapping->a_ops = &afs_fs_aops; /* check to see whether a symbolic link is really a mountpoint */ if (vnode->status.type == AFS_FTYPE_SYMLINK) { afs_mntpt_check_symlink(vnode, key); if (test_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags)) { inode->i_mode = S_IFDIR | vnode->status.mode; inode->i_op = &afs_mntpt_inode_operations; inode->i_fop = &afs_mntpt_file_operations; } } return 0; } /* * iget5() comparator */ static int afs_iget5_test(struct inode *inode, void *opaque) { struct afs_iget_data *data = opaque; return inode->i_ino == data->fid.vnode && inode->i_generation == data->fid.unique; } /* * iget5() comparator for inode created by autocell operations * * These pseudo inodes don't match anything. */ static int afs_iget5_autocell_test(struct inode *inode, void *opaque) { return 0; } /* * iget5() inode initialiser */ static int afs_iget5_set(struct inode *inode, void *opaque) { struct afs_iget_data *data = opaque; struct afs_vnode *vnode = AFS_FS_I(inode); inode->i_ino = data->fid.vnode; inode->i_generation = data->fid.unique; vnode->fid = data->fid; vnode->volume = data->volume; return 0; } /* * inode retrieval for autocell */ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name, int namesz, struct key *key) { struct afs_iget_data data; struct afs_super_info *as; struct afs_vnode *vnode; struct super_block *sb; struct inode *inode; static atomic_t afs_autocell_ino; _enter("{%x:%u},%*.*s,", AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode, namesz, namesz, dev_name ?: ""); sb = dir->i_sb; as = sb->s_fs_info; data.volume = as->volume; data.fid.vid = as->volume->vid; data.fid.unique = 0; data.fid.vnode = 0; inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino), afs_iget5_autocell_test, afs_iget5_set, &data); if (!inode) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } _debug("GOT INODE %p { ino=%lu, vl=%x, vn=%x, u=%x }", inode, inode->i_ino, data.fid.vid, data.fid.vnode, data.fid.unique); vnode = AFS_FS_I(inode); /* there shouldn't be an existing inode */ BUG_ON(!(inode->i_state & I_NEW)); inode->i_size = 0; inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; inode->i_op = &afs_autocell_inode_operations; inode->i_nlink = 2; inode->i_uid = 0; inode->i_gid = 0; inode->i_ctime.tv_sec = get_seconds(); inode->i_ctime.tv_nsec = 0; inode->i_atime = inode->i_mtime = inode->i_ctime; inode->i_blocks = 0; inode->i_version = 0; inode->i_generation = 0; set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags); set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags); inode->i_flags |= S_AUTOMOUNT | S_NOATIME; unlock_new_inode(inode); _leave(" = %p", inode); return inode; } /* * inode retrieval */ struct inode *afs_iget(struct super_block *sb, struct key *key, struct afs_fid *fid, struct afs_file_status *status, struct afs_callback *cb) { struct afs_iget_data data = { .fid = *fid }; struct afs_super_info *as; struct afs_vnode *vnode; struct inode *inode; int ret; _enter(",{%x:%u.%u},,", fid->vid, fid->vnode, fid->unique); as = sb->s_fs_info; data.volume = as->volume; inode = iget5_locked(sb, fid->vnode, afs_iget5_test, afs_iget5_set, &data); if (!inode) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } _debug("GOT INODE %p { vl=%x vn=%x, u=%x }", inode, fid->vid, fid->vnode, fid->unique); vnode = AFS_FS_I(inode); /* deal with an existing inode */ if (!(inode->i_state & I_NEW)) { _leave(" = %p", inode); return inode; } if (!status) { /* it's a remotely extant inode */ set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); ret = afs_vnode_fetch_status(vnode, NULL, key); if (ret < 0) goto bad_inode; } else { /* it's an inode we just created */ memcpy(&vnode->status, status, sizeof(vnode->status)); if (!cb) { /* it's a symlink we just created (the fileserver * didn't give us a callback) */ vnode->cb_version = 0; vnode->cb_expiry = 0; vnode->cb_type = 0; vnode->cb_expires = get_seconds(); } else { vnode->cb_version = cb->version; vnode->cb_expiry = cb->expiry; vnode->cb_type = cb->type; vnode->cb_expires = vnode->cb_expiry + get_seconds(); } } /* set up caching before mapping the status, as map-status reads the * first page of symlinks to see if they're really mountpoints */ inode->i_size = vnode->status.size; #ifdef CONFIG_AFS_FSCACHE vnode->cache = fscache_acquire_cookie(vnode->volume->cache, &afs_vnode_cache_index_def, vnode); #endif ret = afs_inode_map_status(vnode, key); if (ret < 0) goto bad_inode; /* success */ clear_bit(AFS_VNODE_UNSET, &vnode->flags); inode->i_flags |= S_NOATIME; unlock_new_inode(inode); _leave(" = %p [CB { v=%u t=%u }]", inode, vnode->cb_version, vnode->cb_type); return inode; /* failure */ bad_inode: #ifdef CONFIG_AFS_FSCACHE fscache_relinquish_cookie(vnode->cache, 0); vnode->cache = NULL; #endif iget_failed(inode); _leave(" = %d [bad]", ret); return ERR_PTR(ret); } /* * mark the data attached to an inode as obsolete due to a write on the server * - might also want to ditch all the outstanding writes and dirty pages */ void afs_zap_data(struct afs_vnode *vnode) { _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode); /* nuke all the non-dirty pages that aren't locked, mapped or being * written back in a regular file and completely discard the pages in a * directory or symlink */ if (S_ISREG(vnode->vfs_inode.i_mode)) invalidate_remote_inode(&vnode->vfs_inode); else invalidate_inode_pages2(vnode->vfs_inode.i_mapping); } /* * validate a vnode/inode * - there are several things we need to check * - parent dir data changes (rm, rmdir, rename, mkdir, create, link, * symlink) * - parent dir metadata changed (security changes) * - dentry data changed (write, truncate) * - dentry metadata changed (security changes) */ int afs_validate(struct afs_vnode *vnode, struct key *key) { int ret; _enter("{v={%x:%u} fl=%lx},%x", vnode->fid.vid, vnode->fid.vnode, vnode->flags, key_serial(key)); if (vnode->cb_promised && !test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) && !test_bit(AFS_VNODE_MODIFIED, &vnode->flags) && !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) { if (vnode->cb_expires < get_seconds() + 10) { _debug("callback expired"); set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); } else { goto valid; } } if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) goto valid; mutex_lock(&vnode->validate_lock); /* if the promise has expired, we need to check the server again to get * a new promise - note that if the (parent) directory's metadata was * changed then the security may be different and we may no longer have * access */ if (!vnode->cb_promised || test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) { _debug("not promised"); ret = afs_vnode_fetch_status(vnode, NULL, key); if (ret < 0) goto error_unlock; _debug("new promise [fl=%lx]", vnode->flags); } if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { _debug("file already deleted"); ret = -ESTALE; goto error_unlock; } /* if the vnode's data version number changed then its contents are * different */ if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) afs_zap_data(vnode); clear_bit(AFS_VNODE_MODIFIED, &vnode->flags); mutex_unlock(&vnode->validate_lock); valid: _leave(" = 0"); return 0; error_unlock: mutex_unlock(&vnode->validate_lock); _leave(" = %d", ret); return ret; } /* * read the attributes of an inode */ int afs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode; inode = dentry->d_inode; _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation); generic_fillattr(inode, stat); return 0; } /* * discard an AFS inode */ int afs_drop_inode(struct inode *inode) { _enter(""); if (test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(inode)->flags)) return generic_delete_inode(inode); else return generic_drop_inode(inode); } /* * clear an AFS inode */ void afs_evict_inode(struct inode *inode) { struct afs_permits *permits; struct afs_vnode *vnode; vnode = AFS_FS_I(inode); _enter("{%x:%u.%d} v=%u x=%u t=%u }", vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, vnode->cb_version, vnode->cb_expiry, vnode->cb_type); _debug("CLEAR INODE %p", inode); ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode); truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); afs_give_up_callback(vnode); if (vnode->server) { spin_lock(&vnode->server->fs_lock); rb_erase(&vnode->server_rb, &vnode->server->fs_vnodes); spin_unlock(&vnode->server->fs_lock); afs_put_server(vnode->server); vnode->server = NULL; } ASSERT(list_empty(&vnode->writebacks)); ASSERT(!vnode->cb_promised); #ifdef CONFIG_AFS_FSCACHE fscache_relinquish_cookie(vnode->cache, 0); vnode->cache = NULL; #endif mutex_lock(&vnode->permits_lock); permits = vnode->permits; rcu_assign_pointer(vnode->permits, NULL); mutex_unlock(&vnode->permits_lock); if (permits) call_rcu(&permits->rcu, afs_zap_permits); _leave(""); } /* * set the attributes of an inode */ int afs_setattr(struct dentry *dentry, struct iattr *attr) { struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode); struct key *key; int ret; _enter("{%x:%u},{n=%s},%x", vnode->fid.vid, vnode->fid.vnode, dentry->d_name.name, attr->ia_valid); if (!(attr->ia_valid & (ATTR_SIZE | ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME))) { _leave(" = 0 [unsupported]"); return 0; } /* flush any dirty data outstanding on a regular file */ if (S_ISREG(vnode->vfs_inode.i_mode)) { filemap_write_and_wait(vnode->vfs_inode.i_mapping); afs_writeback_all(vnode); } if (attr->ia_valid & ATTR_FILE) { key = attr->ia_file->private_data; } else { key = afs_request_key(vnode->volume->cell); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error; } } ret = afs_vnode_setattr(vnode, key, attr); if (!(attr->ia_valid & ATTR_FILE)) key_put(key); error: _leave(" = %d", ret); return ret; }
gpl-2.0
FennyFatal/i747_kernel_ics
fs/logfs/readwrite.c
2721
56976
/* * fs/logfs/readwrite.c * * As should be obvious for Linux kernel code, license is GPLv2 * * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> * * * Actually contains five sets of very similar functions: * read read blocks from a file * seek_hole find next hole * seek_data find next data block * valid check whether a block still belongs to a file * write write blocks to a file * delete delete a block (for directories and ifile) * rewrite move existing blocks of a file to a new location (gc helper) * truncate truncate a file */ #include "logfs.h" #include <linux/sched.h> #include <linux/slab.h> static u64 adjust_bix(u64 bix, level_t level) { switch (level) { case 0: return bix; case LEVEL(1): return max_t(u64, bix, I0_BLOCKS); case LEVEL(2): return max_t(u64, bix, I1_BLOCKS); case LEVEL(3): return max_t(u64, bix, I2_BLOCKS); case LEVEL(4): return max_t(u64, bix, I3_BLOCKS); case LEVEL(5): return max_t(u64, bix, I4_BLOCKS); default: WARN_ON(1); return bix; } } static inline u64 maxbix(u8 height) { return 1ULL << (LOGFS_BLOCK_BITS * height); } /** * The inode address space is cut in two halves. Lower half belongs to data * pages, upper half to indirect blocks. If the high bit (INDIRECT_BIT) is * set, the actual block index (bix) and level can be derived from the page * index. * * The lowest three bits of the block index are set to 0 after packing and * unpacking. Since the lowest n bits (9 for 4KiB blocksize) are ignored * anyway this is harmless. */ #define ARCH_SHIFT (BITS_PER_LONG - 32) #define INDIRECT_BIT (0x80000000UL << ARCH_SHIFT) #define LEVEL_SHIFT (28 + ARCH_SHIFT) static inline pgoff_t first_indirect_block(void) { return INDIRECT_BIT | (1ULL << LEVEL_SHIFT); } pgoff_t logfs_pack_index(u64 bix, level_t level) { pgoff_t index; BUG_ON(bix >= INDIRECT_BIT); if (level == 0) return bix; index = INDIRECT_BIT; index |= (__force long)level << LEVEL_SHIFT; index |= bix >> ((__force u8)level * LOGFS_BLOCK_BITS); return index; } void logfs_unpack_index(pgoff_t index, u64 *bix, level_t *level) { u8 __level; if (!(index & INDIRECT_BIT)) { *bix = index; *level = 0; return; } __level = (index & ~INDIRECT_BIT) >> LEVEL_SHIFT; *level = LEVEL(__level); *bix = (index << (__level * LOGFS_BLOCK_BITS)) & ~INDIRECT_BIT; *bix = adjust_bix(*bix, *level); return; } #undef ARCH_SHIFT #undef INDIRECT_BIT #undef LEVEL_SHIFT /* * Time is stored as nanoseconds since the epoch. */ static struct timespec be64_to_timespec(__be64 betime) { return ns_to_timespec(be64_to_cpu(betime)); } static __be64 timespec_to_be64(struct timespec tsp) { return cpu_to_be64((u64)tsp.tv_sec * NSEC_PER_SEC + tsp.tv_nsec); } static void logfs_disk_to_inode(struct logfs_disk_inode *di, struct inode*inode) { struct logfs_inode *li = logfs_inode(inode); int i; inode->i_mode = be16_to_cpu(di->di_mode); li->li_height = di->di_height; li->li_flags = be32_to_cpu(di->di_flags); inode->i_uid = be32_to_cpu(di->di_uid); inode->i_gid = be32_to_cpu(di->di_gid); inode->i_size = be64_to_cpu(di->di_size); logfs_set_blocks(inode, be64_to_cpu(di->di_used_bytes)); inode->i_atime = be64_to_timespec(di->di_atime); inode->i_ctime = be64_to_timespec(di->di_ctime); inode->i_mtime = be64_to_timespec(di->di_mtime); inode->i_nlink = be32_to_cpu(di->di_refcount); inode->i_generation = be32_to_cpu(di->di_generation); switch (inode->i_mode & S_IFMT) { case S_IFSOCK: /* fall through */ case S_IFBLK: /* fall through */ case S_IFCHR: /* fall through */ case S_IFIFO: inode->i_rdev = be64_to_cpu(di->di_data[0]); break; case S_IFDIR: /* fall through */ case S_IFREG: /* fall through */ case S_IFLNK: for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++) li->li_data[i] = be64_to_cpu(di->di_data[i]); break; default: BUG(); } } static void logfs_inode_to_disk(struct inode *inode, struct logfs_disk_inode*di) { struct logfs_inode *li = logfs_inode(inode); int i; di->di_mode = cpu_to_be16(inode->i_mode); di->di_height = li->li_height; di->di_pad = 0; di->di_flags = cpu_to_be32(li->li_flags); di->di_uid = cpu_to_be32(inode->i_uid); di->di_gid = cpu_to_be32(inode->i_gid); di->di_size = cpu_to_be64(i_size_read(inode)); di->di_used_bytes = cpu_to_be64(li->li_used_bytes); di->di_atime = timespec_to_be64(inode->i_atime); di->di_ctime = timespec_to_be64(inode->i_ctime); di->di_mtime = timespec_to_be64(inode->i_mtime); di->di_refcount = cpu_to_be32(inode->i_nlink); di->di_generation = cpu_to_be32(inode->i_generation); switch (inode->i_mode & S_IFMT) { case S_IFSOCK: /* fall through */ case S_IFBLK: /* fall through */ case S_IFCHR: /* fall through */ case S_IFIFO: di->di_data[0] = cpu_to_be64(inode->i_rdev); break; case S_IFDIR: /* fall through */ case S_IFREG: /* fall through */ case S_IFLNK: for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++) di->di_data[i] = cpu_to_be64(li->li_data[i]); break; default: BUG(); } } static void __logfs_set_blocks(struct inode *inode) { struct super_block *sb = inode->i_sb; struct logfs_inode *li = logfs_inode(inode); inode->i_blocks = ULONG_MAX; if (li->li_used_bytes >> sb->s_blocksize_bits < ULONG_MAX) inode->i_blocks = ALIGN(li->li_used_bytes, 512) >> 9; } void logfs_set_blocks(struct inode *inode, u64 bytes) { struct logfs_inode *li = logfs_inode(inode); li->li_used_bytes = bytes; __logfs_set_blocks(inode); } static void prelock_page(struct super_block *sb, struct page *page, int lock) { struct logfs_super *super = logfs_super(sb); BUG_ON(!PageLocked(page)); if (lock) { BUG_ON(PagePreLocked(page)); SetPagePreLocked(page); } else { /* We are in GC path. */ if (PagePreLocked(page)) super->s_lock_count++; else SetPagePreLocked(page); } } static void preunlock_page(struct super_block *sb, struct page *page, int lock) { struct logfs_super *super = logfs_super(sb); BUG_ON(!PageLocked(page)); if (lock) ClearPagePreLocked(page); else { /* We are in GC path. */ BUG_ON(!PagePreLocked(page)); if (super->s_lock_count) super->s_lock_count--; else ClearPagePreLocked(page); } } /* * Logfs is prone to an AB-BA deadlock where one task tries to acquire * s_write_mutex with a locked page and GC tries to get that page while holding * s_write_mutex. * To solve this issue logfs will ignore the page lock iff the page in question * is waiting for s_write_mutex. We annotate this fact by setting PG_pre_locked * in addition to PG_locked. */ static void logfs_get_wblocks(struct super_block *sb, struct page *page, int lock) { struct logfs_super *super = logfs_super(sb); if (page) prelock_page(sb, page, lock); if (lock) { mutex_lock(&super->s_write_mutex); logfs_gc_pass(sb); /* FIXME: We also have to check for shadowed space * and mempool fill grade */ } } static void logfs_put_wblocks(struct super_block *sb, struct page *page, int lock) { struct logfs_super *super = logfs_super(sb); if (page) preunlock_page(sb, page, lock); /* Order matters - we must clear PG_pre_locked before releasing * s_write_mutex or we could race against another task. */ if (lock) mutex_unlock(&super->s_write_mutex); } static struct page *logfs_get_read_page(struct inode *inode, u64 bix, level_t level) { return find_or_create_page(inode->i_mapping, logfs_pack_index(bix, level), GFP_NOFS); } static void logfs_put_read_page(struct page *page) { unlock_page(page); page_cache_release(page); } static void logfs_lock_write_page(struct page *page) { int loop = 0; while (unlikely(!trylock_page(page))) { if (loop++ > 0x1000) { /* Has been observed once so far... */ printk(KERN_ERR "stack at %p\n", &loop); BUG(); } if (PagePreLocked(page)) { /* Holder of page lock is waiting for us, it * is safe to use this page. */ break; } /* Some other process has this page locked and has * nothing to do with us. Wait for it to finish. */ schedule(); } BUG_ON(!PageLocked(page)); } static struct page *logfs_get_write_page(struct inode *inode, u64 bix, level_t level) { struct address_space *mapping = inode->i_mapping; pgoff_t index = logfs_pack_index(bix, level); struct page *page; int err; repeat: page = find_get_page(mapping, index); if (!page) { page = __page_cache_alloc(GFP_NOFS); if (!page) return NULL; err = add_to_page_cache_lru(page, mapping, index, GFP_NOFS); if (unlikely(err)) { page_cache_release(page); if (err == -EEXIST) goto repeat; return NULL; } } else logfs_lock_write_page(page); BUG_ON(!PageLocked(page)); return page; } static void logfs_unlock_write_page(struct page *page) { if (!PagePreLocked(page)) unlock_page(page); } static void logfs_put_write_page(struct page *page) { logfs_unlock_write_page(page); page_cache_release(page); } static struct page *logfs_get_page(struct inode *inode, u64 bix, level_t level, int rw) { if (rw == READ) return logfs_get_read_page(inode, bix, level); else return logfs_get_write_page(inode, bix, level); } static void logfs_put_page(struct page *page, int rw) { if (rw == READ) logfs_put_read_page(page); else logfs_put_write_page(page); } static unsigned long __get_bits(u64 val, int skip, int no) { u64 ret = val; ret >>= skip * no; ret <<= 64 - no; ret >>= 64 - no; return ret; } static unsigned long get_bits(u64 val, level_t skip) { return __get_bits(val, (__force int)skip, LOGFS_BLOCK_BITS); } static inline void init_shadow_tree(struct super_block *sb, struct shadow_tree *tree) { struct logfs_super *super = logfs_super(sb); btree_init_mempool64(&tree->new, super->s_btree_pool); btree_init_mempool64(&tree->old, super->s_btree_pool); } static void indirect_write_block(struct logfs_block *block) { struct page *page; struct inode *inode; int ret; page = block->page; inode = page->mapping->host; logfs_lock_write_page(page); ret = logfs_write_buf(inode, page, 0); logfs_unlock_write_page(page); /* * This needs some rework. Unless you want your filesystem to run * completely synchronously (you don't), the filesystem will always * report writes as 'successful' before the actual work has been * done. The actual work gets done here and this is where any errors * will show up. And there isn't much we can do about it, really. * * Some attempts to fix the errors (move from bad blocks, retry io,...) * have already been done, so anything left should be either a broken * device or a bug somewhere in logfs itself. Being relatively new, * the odds currently favor a bug, so for now the line below isn't * entirely tasteles. */ BUG_ON(ret); } static void inode_write_block(struct logfs_block *block) { struct inode *inode; int ret; inode = block->inode; if (inode->i_ino == LOGFS_INO_MASTER) logfs_write_anchor(inode->i_sb); else { ret = __logfs_write_inode(inode, 0); /* see indirect_write_block comment */ BUG_ON(ret); } } /* * This silences a false, yet annoying gcc warning. I hate it when my editor * jumps into bitops.h each time I recompile this file. * TODO: Complain to gcc folks about this and upgrade compiler. */ static unsigned long fnb(const unsigned long *addr, unsigned long size, unsigned long offset) { return find_next_bit(addr, size, offset); } static __be64 inode_val0(struct inode *inode) { struct logfs_inode *li = logfs_inode(inode); u64 val; /* * Explicit shifting generates good code, but must match the format * of the structure. Add some paranoia just in case. */ BUILD_BUG_ON(offsetof(struct logfs_disk_inode, di_mode) != 0); BUILD_BUG_ON(offsetof(struct logfs_disk_inode, di_height) != 2); BUILD_BUG_ON(offsetof(struct logfs_disk_inode, di_flags) != 4); val = (u64)inode->i_mode << 48 | (u64)li->li_height << 40 | (u64)li->li_flags; return cpu_to_be64(val); } static int inode_write_alias(struct super_block *sb, struct logfs_block *block, write_alias_t *write_one_alias) { struct inode *inode = block->inode; struct logfs_inode *li = logfs_inode(inode); unsigned long pos; u64 ino , bix; __be64 val; level_t level; int err; for (pos = 0; ; pos++) { pos = fnb(block->alias_map, LOGFS_BLOCK_FACTOR, pos); if (pos >= LOGFS_EMBEDDED_FIELDS + INODE_POINTER_OFS) return 0; switch (pos) { case INODE_HEIGHT_OFS: val = inode_val0(inode); break; case INODE_USED_OFS: val = cpu_to_be64(li->li_used_bytes); break; case INODE_SIZE_OFS: val = cpu_to_be64(i_size_read(inode)); break; case INODE_POINTER_OFS ... INODE_POINTER_OFS + LOGFS_EMBEDDED_FIELDS - 1: val = cpu_to_be64(li->li_data[pos - INODE_POINTER_OFS]); break; default: BUG(); } ino = LOGFS_INO_MASTER; bix = inode->i_ino; level = LEVEL(0); err = write_one_alias(sb, ino, bix, level, pos, val); if (err) return err; } } static int indirect_write_alias(struct super_block *sb, struct logfs_block *block, write_alias_t *write_one_alias) { unsigned long pos; struct page *page = block->page; u64 ino , bix; __be64 *child, val; level_t level; int err; for (pos = 0; ; pos++) { pos = fnb(block->alias_map, LOGFS_BLOCK_FACTOR, pos); if (pos >= LOGFS_BLOCK_FACTOR) return 0; ino = page->mapping->host->i_ino; logfs_unpack_index(page->index, &bix, &level); child = kmap_atomic(page, KM_USER0); val = child[pos]; kunmap_atomic(child, KM_USER0); err = write_one_alias(sb, ino, bix, level, pos, val); if (err) return err; } } int logfs_write_obj_aliases_pagecache(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct logfs_block *block; int err; list_for_each_entry(block, &super->s_object_alias, alias_list) { err = block->ops->write_alias(sb, block, write_alias_journal); if (err) return err; } return 0; } void __free_block(struct super_block *sb, struct logfs_block *block) { BUG_ON(!list_empty(&block->item_list)); list_del(&block->alias_list); mempool_free(block, logfs_super(sb)->s_block_pool); } static void inode_free_block(struct super_block *sb, struct logfs_block *block) { struct inode *inode = block->inode; logfs_inode(inode)->li_block = NULL; __free_block(sb, block); } static void indirect_free_block(struct super_block *sb, struct logfs_block *block) { ClearPagePrivate(block->page); block->page->private = 0; __free_block(sb, block); } static struct logfs_block_ops inode_block_ops = { .write_block = inode_write_block, .free_block = inode_free_block, .write_alias = inode_write_alias, }; struct logfs_block_ops indirect_block_ops = { .write_block = indirect_write_block, .free_block = indirect_free_block, .write_alias = indirect_write_alias, }; struct logfs_block *__alloc_block(struct super_block *sb, u64 ino, u64 bix, level_t level) { struct logfs_super *super = logfs_super(sb); struct logfs_block *block; block = mempool_alloc(super->s_block_pool, GFP_NOFS); memset(block, 0, sizeof(*block)); INIT_LIST_HEAD(&block->alias_list); INIT_LIST_HEAD(&block->item_list); block->sb = sb; block->ino = ino; block->bix = bix; block->level = level; return block; } static void alloc_inode_block(struct inode *inode) { struct logfs_inode *li = logfs_inode(inode); struct logfs_block *block; if (li->li_block) return; block = __alloc_block(inode->i_sb, LOGFS_INO_MASTER, inode->i_ino, 0); block->inode = inode; li->li_block = block; block->ops = &inode_block_ops; } void initialize_block_counters(struct page *page, struct logfs_block *block, __be64 *array, int page_is_empty) { u64 ptr; int i, start; block->partial = 0; block->full = 0; start = 0; if (page->index < first_indirect_block()) { /* Counters are pointless on level 0 */ return; } if (page->index == first_indirect_block()) { /* Skip unused pointers */ start = I0_BLOCKS; block->full = I0_BLOCKS; } if (!page_is_empty) { for (i = start; i < LOGFS_BLOCK_FACTOR; i++) { ptr = be64_to_cpu(array[i]); if (ptr) block->partial++; if (ptr & LOGFS_FULLY_POPULATED) block->full++; } } } static void alloc_data_block(struct inode *inode, struct page *page) { struct logfs_block *block; u64 bix; level_t level; if (PagePrivate(page)) return; logfs_unpack_index(page->index, &bix, &level); block = __alloc_block(inode->i_sb, inode->i_ino, bix, level); block->page = page; SetPagePrivate(page); page->private = (unsigned long)block; block->ops = &indirect_block_ops; } static void alloc_indirect_block(struct inode *inode, struct page *page, int page_is_empty) { struct logfs_block *block; __be64 *array; if (PagePrivate(page)) return; alloc_data_block(inode, page); block = logfs_block(page); array = kmap_atomic(page, KM_USER0); initialize_block_counters(page, block, array, page_is_empty); kunmap_atomic(array, KM_USER0); } static void block_set_pointer(struct page *page, int index, u64 ptr) { struct logfs_block *block = logfs_block(page); __be64 *array; u64 oldptr; BUG_ON(!block); array = kmap_atomic(page, KM_USER0); oldptr = be64_to_cpu(array[index]); array[index] = cpu_to_be64(ptr); kunmap_atomic(array, KM_USER0); SetPageUptodate(page); block->full += !!(ptr & LOGFS_FULLY_POPULATED) - !!(oldptr & LOGFS_FULLY_POPULATED); block->partial += !!ptr - !!oldptr; } static u64 block_get_pointer(struct page *page, int index) { __be64 *block; u64 ptr; block = kmap_atomic(page, KM_USER0); ptr = be64_to_cpu(block[index]); kunmap_atomic(block, KM_USER0); return ptr; } static int logfs_read_empty(struct page *page) { zero_user_segment(page, 0, PAGE_CACHE_SIZE); return 0; } static int logfs_read_direct(struct inode *inode, struct page *page) { struct logfs_inode *li = logfs_inode(inode); pgoff_t index = page->index; u64 block; block = li->li_data[index]; if (!block) return logfs_read_empty(page); return logfs_segment_read(inode, page, block, index, 0); } static int logfs_read_loop(struct inode *inode, struct page *page, int rw_context) { struct logfs_inode *li = logfs_inode(inode); u64 bix, bofs = li->li_data[INDIRECT_INDEX]; level_t level, target_level; int ret; struct page *ipage; logfs_unpack_index(page->index, &bix, &target_level); if (!bofs) return logfs_read_empty(page); if (bix >= maxbix(li->li_height)) return logfs_read_empty(page); for (level = LEVEL(li->li_height); (__force u8)level > (__force u8)target_level; level = SUBLEVEL(level)){ ipage = logfs_get_page(inode, bix, level, rw_context); if (!ipage) return -ENOMEM; ret = logfs_segment_read(inode, ipage, bofs, bix, level); if (ret) { logfs_put_read_page(ipage); return ret; } bofs = block_get_pointer(ipage, get_bits(bix, SUBLEVEL(level))); logfs_put_page(ipage, rw_context); if (!bofs) return logfs_read_empty(page); } return logfs_segment_read(inode, page, bofs, bix, 0); } static int logfs_read_block(struct inode *inode, struct page *page, int rw_context) { pgoff_t index = page->index; if (index < I0_BLOCKS) return logfs_read_direct(inode, page); return logfs_read_loop(inode, page, rw_context); } static int logfs_exist_loop(struct inode *inode, u64 bix) { struct logfs_inode *li = logfs_inode(inode); u64 bofs = li->li_data[INDIRECT_INDEX]; level_t level; int ret; struct page *ipage; if (!bofs) return 0; if (bix >= maxbix(li->li_height)) return 0; for (level = LEVEL(li->li_height); level != 0; level = SUBLEVEL(level)) { ipage = logfs_get_read_page(inode, bix, level); if (!ipage) return -ENOMEM; ret = logfs_segment_read(inode, ipage, bofs, bix, level); if (ret) { logfs_put_read_page(ipage); return ret; } bofs = block_get_pointer(ipage, get_bits(bix, SUBLEVEL(level))); logfs_put_read_page(ipage); if (!bofs) return 0; } return 1; } int logfs_exist_block(struct inode *inode, u64 bix) { struct logfs_inode *li = logfs_inode(inode); if (bix < I0_BLOCKS) return !!li->li_data[bix]; return logfs_exist_loop(inode, bix); } static u64 seek_holedata_direct(struct inode *inode, u64 bix, int data) { struct logfs_inode *li = logfs_inode(inode); for (; bix < I0_BLOCKS; bix++) if (data ^ (li->li_data[bix] == 0)) return bix; return I0_BLOCKS; } static u64 seek_holedata_loop(struct inode *inode, u64 bix, int data) { struct logfs_inode *li = logfs_inode(inode); __be64 *rblock; u64 increment, bofs = li->li_data[INDIRECT_INDEX]; level_t level; int ret, slot; struct page *page; BUG_ON(!bofs); for (level = LEVEL(li->li_height); level != 0; level = SUBLEVEL(level)) { increment = 1 << (LOGFS_BLOCK_BITS * ((__force u8)level-1)); page = logfs_get_read_page(inode, bix, level); if (!page) return bix; ret = logfs_segment_read(inode, page, bofs, bix, level); if (ret) { logfs_put_read_page(page); return bix; } slot = get_bits(bix, SUBLEVEL(level)); rblock = kmap_atomic(page, KM_USER0); while (slot < LOGFS_BLOCK_FACTOR) { if (data && (rblock[slot] != 0)) break; if (!data && !(be64_to_cpu(rblock[slot]) & LOGFS_FULLY_POPULATED)) break; slot++; bix += increment; bix &= ~(increment - 1); } if (slot >= LOGFS_BLOCK_FACTOR) { kunmap_atomic(rblock, KM_USER0); logfs_put_read_page(page); return bix; } bofs = be64_to_cpu(rblock[slot]); kunmap_atomic(rblock, KM_USER0); logfs_put_read_page(page); if (!bofs) { BUG_ON(data); return bix; } } return bix; } /** * logfs_seek_hole - find next hole starting at a given block index * @inode: inode to search in * @bix: block index to start searching * * Returns next hole. If the file doesn't contain any further holes, the * block address next to eof is returned instead. */ u64 logfs_seek_hole(struct inode *inode, u64 bix) { struct logfs_inode *li = logfs_inode(inode); if (bix < I0_BLOCKS) { bix = seek_holedata_direct(inode, bix, 0); if (bix < I0_BLOCKS) return bix; } if (!li->li_data[INDIRECT_INDEX]) return bix; else if (li->li_data[INDIRECT_INDEX] & LOGFS_FULLY_POPULATED) bix = maxbix(li->li_height); else if (bix >= maxbix(li->li_height)) return bix; else { bix = seek_holedata_loop(inode, bix, 0); if (bix < maxbix(li->li_height)) return bix; /* Should not happen anymore. But if some port writes semi- * corrupt images (as this one used to) we might run into it. */ WARN_ON_ONCE(bix == maxbix(li->li_height)); } return bix; } static u64 __logfs_seek_data(struct inode *inode, u64 bix) { struct logfs_inode *li = logfs_inode(inode); if (bix < I0_BLOCKS) { bix = seek_holedata_direct(inode, bix, 1); if (bix < I0_BLOCKS) return bix; } if (bix < maxbix(li->li_height)) { if (!li->li_data[INDIRECT_INDEX]) bix = maxbix(li->li_height); else return seek_holedata_loop(inode, bix, 1); } return bix; } /** * logfs_seek_data - find next data block after a given block index * @inode: inode to search in * @bix: block index to start searching * * Returns next data block. If the file doesn't contain any further data * blocks, the last block in the file is returned instead. */ u64 logfs_seek_data(struct inode *inode, u64 bix) { struct super_block *sb = inode->i_sb; u64 ret, end; ret = __logfs_seek_data(inode, bix); end = i_size_read(inode) >> sb->s_blocksize_bits; if (ret >= end) ret = max(bix, end); return ret; } static int logfs_is_valid_direct(struct logfs_inode *li, u64 bix, u64 ofs) { return pure_ofs(li->li_data[bix]) == ofs; } static int __logfs_is_valid_loop(struct inode *inode, u64 bix, u64 ofs, u64 bofs) { struct logfs_inode *li = logfs_inode(inode); level_t level; int ret; struct page *page; for (level = LEVEL(li->li_height); level != 0; level = SUBLEVEL(level)){ page = logfs_get_write_page(inode, bix, level); BUG_ON(!page); ret = logfs_segment_read(inode, page, bofs, bix, level); if (ret) { logfs_put_write_page(page); return 0; } bofs = block_get_pointer(page, get_bits(bix, SUBLEVEL(level))); logfs_put_write_page(page); if (!bofs) return 0; if (pure_ofs(bofs) == ofs) return 1; } return 0; } static int logfs_is_valid_loop(struct inode *inode, u64 bix, u64 ofs) { struct logfs_inode *li = logfs_inode(inode); u64 bofs = li->li_data[INDIRECT_INDEX]; if (!bofs) return 0; if (bix >= maxbix(li->li_height)) return 0; if (pure_ofs(bofs) == ofs) return 1; return __logfs_is_valid_loop(inode, bix, ofs, bofs); } static int __logfs_is_valid_block(struct inode *inode, u64 bix, u64 ofs) { struct logfs_inode *li = logfs_inode(inode); if ((inode->i_nlink == 0) && atomic_read(&inode->i_count) == 1) return 0; if (bix < I0_BLOCKS) return logfs_is_valid_direct(li, bix, ofs); return logfs_is_valid_loop(inode, bix, ofs); } /** * logfs_is_valid_block - check whether this block is still valid * * @sb - superblock * @ofs - block physical offset * @ino - block inode number * @bix - block index * @level - block level * * Returns 0 if the block is invalid, 1 if it is valid and 2 if it will * become invalid once the journal is written. */ int logfs_is_valid_block(struct super_block *sb, u64 ofs, u64 ino, u64 bix, gc_level_t gc_level) { struct logfs_super *super = logfs_super(sb); struct inode *inode; int ret, cookie; /* Umount closes a segment with free blocks remaining. Those * blocks are by definition invalid. */ if (ino == -1) return 0; LOGFS_BUG_ON((u64)(u_long)ino != ino, sb); inode = logfs_safe_iget(sb, ino, &cookie); if (IS_ERR(inode)) goto invalid; ret = __logfs_is_valid_block(inode, bix, ofs); logfs_safe_iput(inode, cookie); if (ret) return ret; invalid: /* Block is nominally invalid, but may still sit in the shadow tree, * waiting for a journal commit. */ if (btree_lookup64(&super->s_shadow_tree.old, ofs)) return 2; return 0; } int logfs_readpage_nolock(struct page *page) { struct inode *inode = page->mapping->host; int ret = -EIO; ret = logfs_read_block(inode, page, READ); if (ret) { ClearPageUptodate(page); SetPageError(page); } else { SetPageUptodate(page); ClearPageError(page); } flush_dcache_page(page); return ret; } static int logfs_reserve_bytes(struct inode *inode, int bytes) { struct logfs_super *super = logfs_super(inode->i_sb); u64 available = super->s_free_bytes + super->s_dirty_free_bytes - super->s_dirty_used_bytes - super->s_dirty_pages; if (!bytes) return 0; if (available < bytes) return -ENOSPC; if (available < bytes + super->s_root_reserve && !capable(CAP_SYS_RESOURCE)) return -ENOSPC; return 0; } int get_page_reserve(struct inode *inode, struct page *page) { struct logfs_super *super = logfs_super(inode->i_sb); struct logfs_block *block = logfs_block(page); int ret; if (block && block->reserved_bytes) return 0; logfs_get_wblocks(inode->i_sb, page, WF_LOCK); while ((ret = logfs_reserve_bytes(inode, 6 * LOGFS_MAX_OBJECTSIZE)) && !list_empty(&super->s_writeback_list)) { block = list_entry(super->s_writeback_list.next, struct logfs_block, alias_list); block->ops->write_block(block); } if (!ret) { alloc_data_block(inode, page); block = logfs_block(page); block->reserved_bytes += 6 * LOGFS_MAX_OBJECTSIZE; super->s_dirty_pages += 6 * LOGFS_MAX_OBJECTSIZE; list_move_tail(&block->alias_list, &super->s_writeback_list); } logfs_put_wblocks(inode->i_sb, page, WF_LOCK); return ret; } /* * We are protected by write lock. Push victims up to superblock level * and release transaction when appropriate. */ /* FIXME: This is currently called from the wrong spots. */ static void logfs_handle_transaction(struct inode *inode, struct logfs_transaction *ta) { struct logfs_super *super = logfs_super(inode->i_sb); if (!ta) return; logfs_inode(inode)->li_block->ta = NULL; if (inode->i_ino != LOGFS_INO_MASTER) { BUG(); /* FIXME: Yes, this needs more thought */ /* just remember the transaction until inode is written */ //BUG_ON(logfs_inode(inode)->li_transaction); //logfs_inode(inode)->li_transaction = ta; return; } switch (ta->state) { case CREATE_1: /* fall through */ case UNLINK_1: BUG_ON(super->s_victim_ino); super->s_victim_ino = ta->ino; break; case CREATE_2: /* fall through */ case UNLINK_2: BUG_ON(super->s_victim_ino != ta->ino); super->s_victim_ino = 0; /* transaction ends here - free it */ kfree(ta); break; case CROSS_RENAME_1: BUG_ON(super->s_rename_dir); BUG_ON(super->s_rename_pos); super->s_rename_dir = ta->dir; super->s_rename_pos = ta->pos; break; case CROSS_RENAME_2: BUG_ON(super->s_rename_dir != ta->dir); BUG_ON(super->s_rename_pos != ta->pos); super->s_rename_dir = 0; super->s_rename_pos = 0; kfree(ta); break; case TARGET_RENAME_1: BUG_ON(super->s_rename_dir); BUG_ON(super->s_rename_pos); BUG_ON(super->s_victim_ino); super->s_rename_dir = ta->dir; super->s_rename_pos = ta->pos; super->s_victim_ino = ta->ino; break; case TARGET_RENAME_2: BUG_ON(super->s_rename_dir != ta->dir); BUG_ON(super->s_rename_pos != ta->pos); BUG_ON(super->s_victim_ino != ta->ino); super->s_rename_dir = 0; super->s_rename_pos = 0; break; case TARGET_RENAME_3: BUG_ON(super->s_rename_dir); BUG_ON(super->s_rename_pos); BUG_ON(super->s_victim_ino != ta->ino); super->s_victim_ino = 0; kfree(ta); break; default: BUG(); } } /* * Not strictly a reservation, but rather a check that we still have enough * space to satisfy the write. */ static int logfs_reserve_blocks(struct inode *inode, int blocks) { return logfs_reserve_bytes(inode, blocks * LOGFS_MAX_OBJECTSIZE); } struct write_control { u64 ofs; long flags; }; static struct logfs_shadow *alloc_shadow(struct inode *inode, u64 bix, level_t level, u64 old_ofs) { struct logfs_super *super = logfs_super(inode->i_sb); struct logfs_shadow *shadow; shadow = mempool_alloc(super->s_shadow_pool, GFP_NOFS); memset(shadow, 0, sizeof(*shadow)); shadow->ino = inode->i_ino; shadow->bix = bix; shadow->gc_level = expand_level(inode->i_ino, level); shadow->old_ofs = old_ofs & ~LOGFS_FULLY_POPULATED; return shadow; } static void free_shadow(struct inode *inode, struct logfs_shadow *shadow) { struct logfs_super *super = logfs_super(inode->i_sb); mempool_free(shadow, super->s_shadow_pool); } static void mark_segment(struct shadow_tree *tree, u32 segno) { int err; if (!btree_lookup32(&tree->segment_map, segno)) { err = btree_insert32(&tree->segment_map, segno, (void *)1, GFP_NOFS); BUG_ON(err); tree->no_shadowed_segments++; } } /** * fill_shadow_tree - Propagate shadow tree changes due to a write * @inode: Inode owning the page * @page: Struct page that was written * @shadow: Shadow for the current write * * Writes in logfs can result in two semi-valid objects. The old object * is still valid as long as it can be reached by following pointers on * the medium. Only when writes propagate all the way up to the journal * has the new object safely replaced the old one. * * To handle this problem, a struct logfs_shadow is used to represent * every single write. It is attached to the indirect block, which is * marked dirty. When the indirect block is written, its shadows are * handed up to the next indirect block (or inode). Untimately they * will reach the master inode and be freed upon journal commit. * * This function handles a single step in the propagation. It adds the * shadow for the current write to the tree, along with any shadows in * the page's tree, in case it was an indirect block. If a page is * written, the inode parameter is left NULL, if an inode is written, * the page parameter is left NULL. */ static void fill_shadow_tree(struct inode *inode, struct page *page, struct logfs_shadow *shadow) { struct logfs_super *super = logfs_super(inode->i_sb); struct logfs_block *block = logfs_block(page); struct shadow_tree *tree = &super->s_shadow_tree; if (PagePrivate(page)) { if (block->alias_map) super->s_no_object_aliases -= bitmap_weight( block->alias_map, LOGFS_BLOCK_FACTOR); logfs_handle_transaction(inode, block->ta); block->ops->free_block(inode->i_sb, block); } if (shadow) { if (shadow->old_ofs) btree_insert64(&tree->old, shadow->old_ofs, shadow, GFP_NOFS); else btree_insert64(&tree->new, shadow->new_ofs, shadow, GFP_NOFS); super->s_dirty_used_bytes += shadow->new_len; super->s_dirty_free_bytes += shadow->old_len; mark_segment(tree, shadow->old_ofs >> super->s_segshift); mark_segment(tree, shadow->new_ofs >> super->s_segshift); } } static void logfs_set_alias(struct super_block *sb, struct logfs_block *block, long child_no) { struct logfs_super *super = logfs_super(sb); if (block->inode && block->inode->i_ino == LOGFS_INO_MASTER) { /* Aliases in the master inode are pointless. */ return; } if (!test_bit(child_no, block->alias_map)) { set_bit(child_no, block->alias_map); super->s_no_object_aliases++; } list_move_tail(&block->alias_list, &super->s_object_alias); } /* * Object aliases can and often do change the size and occupied space of a * file. So not only do we have to change the pointers, we also have to * change inode->i_size and li->li_used_bytes. Which is done by setting * another two object aliases for the inode itself. */ static void set_iused(struct inode *inode, struct logfs_shadow *shadow) { struct logfs_inode *li = logfs_inode(inode); if (shadow->new_len == shadow->old_len) return; alloc_inode_block(inode); li->li_used_bytes += shadow->new_len - shadow->old_len; __logfs_set_blocks(inode); logfs_set_alias(inode->i_sb, li->li_block, INODE_USED_OFS); logfs_set_alias(inode->i_sb, li->li_block, INODE_SIZE_OFS); } static int logfs_write_i0(struct inode *inode, struct page *page, struct write_control *wc) { struct logfs_shadow *shadow; u64 bix; level_t level; int full, err = 0; logfs_unpack_index(page->index, &bix, &level); if (wc->ofs == 0) if (logfs_reserve_blocks(inode, 1)) return -ENOSPC; shadow = alloc_shadow(inode, bix, level, wc->ofs); if (wc->flags & WF_WRITE) err = logfs_segment_write(inode, page, shadow); if (wc->flags & WF_DELETE) logfs_segment_delete(inode, shadow); if (err) { free_shadow(inode, shadow); return err; } set_iused(inode, shadow); full = 1; if (level != 0) { alloc_indirect_block(inode, page, 0); full = logfs_block(page)->full == LOGFS_BLOCK_FACTOR; } fill_shadow_tree(inode, page, shadow); wc->ofs = shadow->new_ofs; if (wc->ofs && full) wc->ofs |= LOGFS_FULLY_POPULATED; return 0; } static int logfs_write_direct(struct inode *inode, struct page *page, long flags) { struct logfs_inode *li = logfs_inode(inode); struct write_control wc = { .ofs = li->li_data[page->index], .flags = flags, }; int err; alloc_inode_block(inode); err = logfs_write_i0(inode, page, &wc); if (err) return err; li->li_data[page->index] = wc.ofs; logfs_set_alias(inode->i_sb, li->li_block, page->index + INODE_POINTER_OFS); return 0; } static int ptr_change(u64 ofs, struct page *page) { struct logfs_block *block = logfs_block(page); int empty0, empty1, full0, full1; empty0 = ofs == 0; empty1 = block->partial == 0; if (empty0 != empty1) return 1; /* The !! is necessary to shrink result to int */ full0 = !!(ofs & LOGFS_FULLY_POPULATED); full1 = block->full == LOGFS_BLOCK_FACTOR; if (full0 != full1) return 1; return 0; } static int __logfs_write_rec(struct inode *inode, struct page *page, struct write_control *this_wc, pgoff_t bix, level_t target_level, level_t level) { int ret, page_empty = 0; int child_no = get_bits(bix, SUBLEVEL(level)); struct page *ipage; struct write_control child_wc = { .flags = this_wc->flags, }; ipage = logfs_get_write_page(inode, bix, level); if (!ipage) return -ENOMEM; if (this_wc->ofs) { ret = logfs_segment_read(inode, ipage, this_wc->ofs, bix, level); if (ret) goto out; } else if (!PageUptodate(ipage)) { page_empty = 1; logfs_read_empty(ipage); } child_wc.ofs = block_get_pointer(ipage, child_no); if ((__force u8)level-1 > (__force u8)target_level) ret = __logfs_write_rec(inode, page, &child_wc, bix, target_level, SUBLEVEL(level)); else ret = logfs_write_i0(inode, page, &child_wc); if (ret) goto out; alloc_indirect_block(inode, ipage, page_empty); block_set_pointer(ipage, child_no, child_wc.ofs); /* FIXME: first condition seems superfluous */ if (child_wc.ofs || logfs_block(ipage)->partial) this_wc->flags |= WF_WRITE; /* the condition on this_wc->ofs ensures that we won't consume extra * space for indirect blocks in the future, which we cannot reserve */ if (!this_wc->ofs || ptr_change(this_wc->ofs, ipage)) ret = logfs_write_i0(inode, ipage, this_wc); else logfs_set_alias(inode->i_sb, logfs_block(ipage), child_no); out: logfs_put_write_page(ipage); return ret; } static int logfs_write_rec(struct inode *inode, struct page *page, pgoff_t bix, level_t target_level, long flags) { struct logfs_inode *li = logfs_inode(inode); struct write_control wc = { .ofs = li->li_data[INDIRECT_INDEX], .flags = flags, }; int ret; alloc_inode_block(inode); if (li->li_height > (__force u8)target_level) ret = __logfs_write_rec(inode, page, &wc, bix, target_level, LEVEL(li->li_height)); else ret = logfs_write_i0(inode, page, &wc); if (ret) return ret; if (li->li_data[INDIRECT_INDEX] != wc.ofs) { li->li_data[INDIRECT_INDEX] = wc.ofs; logfs_set_alias(inode->i_sb, li->li_block, INDIRECT_INDEX + INODE_POINTER_OFS); } return ret; } void logfs_add_transaction(struct inode *inode, struct logfs_transaction *ta) { alloc_inode_block(inode); logfs_inode(inode)->li_block->ta = ta; } void logfs_del_transaction(struct inode *inode, struct logfs_transaction *ta) { struct logfs_block *block = logfs_inode(inode)->li_block; if (block && block->ta) block->ta = NULL; } static int grow_inode(struct inode *inode, u64 bix, level_t level) { struct logfs_inode *li = logfs_inode(inode); u8 height = (__force u8)level; struct page *page; struct write_control wc = { .flags = WF_WRITE, }; int err; BUG_ON(height > 5 || li->li_height > 5); while (height > li->li_height || bix >= maxbix(li->li_height)) { page = logfs_get_write_page(inode, I0_BLOCKS + 1, LEVEL(li->li_height + 1)); if (!page) return -ENOMEM; logfs_read_empty(page); alloc_indirect_block(inode, page, 1); block_set_pointer(page, 0, li->li_data[INDIRECT_INDEX]); err = logfs_write_i0(inode, page, &wc); logfs_put_write_page(page); if (err) return err; li->li_data[INDIRECT_INDEX] = wc.ofs; wc.ofs = 0; li->li_height++; logfs_set_alias(inode->i_sb, li->li_block, INODE_HEIGHT_OFS); } return 0; } static int __logfs_write_buf(struct inode *inode, struct page *page, long flags) { struct logfs_super *super = logfs_super(inode->i_sb); pgoff_t index = page->index; u64 bix; level_t level; int err; flags |= WF_WRITE | WF_DELETE; inode->i_ctime = inode->i_mtime = CURRENT_TIME; logfs_unpack_index(index, &bix, &level); if (logfs_block(page) && logfs_block(page)->reserved_bytes) super->s_dirty_pages -= logfs_block(page)->reserved_bytes; if (index < I0_BLOCKS) return logfs_write_direct(inode, page, flags); bix = adjust_bix(bix, level); err = grow_inode(inode, bix, level); if (err) return err; return logfs_write_rec(inode, page, bix, level, flags); } int logfs_write_buf(struct inode *inode, struct page *page, long flags) { struct super_block *sb = inode->i_sb; int ret; logfs_get_wblocks(sb, page, flags & WF_LOCK); ret = __logfs_write_buf(inode, page, flags); logfs_put_wblocks(sb, page, flags & WF_LOCK); return ret; } static int __logfs_delete(struct inode *inode, struct page *page) { long flags = WF_DELETE; inode->i_ctime = inode->i_mtime = CURRENT_TIME; if (page->index < I0_BLOCKS) return logfs_write_direct(inode, page, flags); return logfs_write_rec(inode, page, page->index, 0, flags); } int logfs_delete(struct inode *inode, pgoff_t index, struct shadow_tree *shadow_tree) { struct super_block *sb = inode->i_sb; struct page *page; int ret; page = logfs_get_read_page(inode, index, 0); if (!page) return -ENOMEM; logfs_get_wblocks(sb, page, 1); ret = __logfs_delete(inode, page); logfs_put_wblocks(sb, page, 1); logfs_put_read_page(page); return ret; } int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs, gc_level_t gc_level, long flags) { level_t level = shrink_level(gc_level); struct page *page; int err; page = logfs_get_write_page(inode, bix, level); if (!page) return -ENOMEM; err = logfs_segment_read(inode, page, ofs, bix, level); if (!err) { if (level != 0) alloc_indirect_block(inode, page, 0); err = logfs_write_buf(inode, page, flags); if (!err && shrink_level(gc_level) == 0) { /* Rewrite cannot mark the inode dirty but has to * write it immediately. * Q: Can't we just create an alias for the inode * instead? And if not, why not? */ if (inode->i_ino == LOGFS_INO_MASTER) logfs_write_anchor(inode->i_sb); else { err = __logfs_write_inode(inode, flags); } } } logfs_put_write_page(page); return err; } static int truncate_data_block(struct inode *inode, struct page *page, u64 ofs, struct logfs_shadow *shadow, u64 size) { loff_t pageofs = page->index << inode->i_sb->s_blocksize_bits; u64 bix; level_t level; int err; /* Does truncation happen within this page? */ if (size <= pageofs || size - pageofs >= PAGE_SIZE) return 0; logfs_unpack_index(page->index, &bix, &level); BUG_ON(level != 0); err = logfs_segment_read(inode, page, ofs, bix, level); if (err) return err; zero_user_segment(page, size - pageofs, PAGE_CACHE_SIZE); return logfs_segment_write(inode, page, shadow); } static int logfs_truncate_i0(struct inode *inode, struct page *page, struct write_control *wc, u64 size) { struct logfs_shadow *shadow; u64 bix; level_t level; int err = 0; logfs_unpack_index(page->index, &bix, &level); BUG_ON(level != 0); shadow = alloc_shadow(inode, bix, level, wc->ofs); err = truncate_data_block(inode, page, wc->ofs, shadow, size); if (err) { free_shadow(inode, shadow); return err; } logfs_segment_delete(inode, shadow); set_iused(inode, shadow); fill_shadow_tree(inode, page, shadow); wc->ofs = shadow->new_ofs; return 0; } static int logfs_truncate_direct(struct inode *inode, u64 size) { struct logfs_inode *li = logfs_inode(inode); struct write_control wc; struct page *page; int e; int err; alloc_inode_block(inode); for (e = I0_BLOCKS - 1; e >= 0; e--) { if (size > (e+1) * LOGFS_BLOCKSIZE) break; wc.ofs = li->li_data[e]; if (!wc.ofs) continue; page = logfs_get_write_page(inode, e, 0); if (!page) return -ENOMEM; err = logfs_segment_read(inode, page, wc.ofs, e, 0); if (err) { logfs_put_write_page(page); return err; } err = logfs_truncate_i0(inode, page, &wc, size); logfs_put_write_page(page); if (err) return err; li->li_data[e] = wc.ofs; } return 0; } /* FIXME: these need to become per-sb once we support different blocksizes */ static u64 __logfs_step[] = { 1, I1_BLOCKS, I2_BLOCKS, I3_BLOCKS, }; static u64 __logfs_start_index[] = { I0_BLOCKS, I1_BLOCKS, I2_BLOCKS, I3_BLOCKS }; static inline u64 logfs_step(level_t level) { return __logfs_step[(__force u8)level]; } static inline u64 logfs_factor(u8 level) { return __logfs_step[level] * LOGFS_BLOCKSIZE; } static inline u64 logfs_start_index(level_t level) { return __logfs_start_index[(__force u8)level]; } static void logfs_unpack_raw_index(pgoff_t index, u64 *bix, level_t *level) { logfs_unpack_index(index, bix, level); if (*bix <= logfs_start_index(SUBLEVEL(*level))) *bix = 0; } static int __logfs_truncate_rec(struct inode *inode, struct page *ipage, struct write_control *this_wc, u64 size) { int truncate_happened = 0; int e, err = 0; u64 bix, child_bix, next_bix; level_t level; struct page *page; struct write_control child_wc = { /* FIXME: flags */ }; logfs_unpack_raw_index(ipage->index, &bix, &level); err = logfs_segment_read(inode, ipage, this_wc->ofs, bix, level); if (err) return err; for (e = LOGFS_BLOCK_FACTOR - 1; e >= 0; e--) { child_bix = bix + e * logfs_step(SUBLEVEL(level)); next_bix = child_bix + logfs_step(SUBLEVEL(level)); if (size > next_bix * LOGFS_BLOCKSIZE) break; child_wc.ofs = pure_ofs(block_get_pointer(ipage, e)); if (!child_wc.ofs) continue; page = logfs_get_write_page(inode, child_bix, SUBLEVEL(level)); if (!page) return -ENOMEM; if ((__force u8)level > 1) err = __logfs_truncate_rec(inode, page, &child_wc, size); else err = logfs_truncate_i0(inode, page, &child_wc, size); logfs_put_write_page(page); if (err) return err; truncate_happened = 1; alloc_indirect_block(inode, ipage, 0); block_set_pointer(ipage, e, child_wc.ofs); } if (!truncate_happened) { printk("ineffectual truncate (%lx, %lx, %llx)\n", inode->i_ino, ipage->index, size); return 0; } this_wc->flags = WF_DELETE; if (logfs_block(ipage)->partial) this_wc->flags |= WF_WRITE; return logfs_write_i0(inode, ipage, this_wc); } static int logfs_truncate_rec(struct inode *inode, u64 size) { struct logfs_inode *li = logfs_inode(inode); struct write_control wc = { .ofs = li->li_data[INDIRECT_INDEX], }; struct page *page; int err; alloc_inode_block(inode); if (!wc.ofs) return 0; page = logfs_get_write_page(inode, 0, LEVEL(li->li_height)); if (!page) return -ENOMEM; err = __logfs_truncate_rec(inode, page, &wc, size); logfs_put_write_page(page); if (err) return err; if (li->li_data[INDIRECT_INDEX] != wc.ofs) li->li_data[INDIRECT_INDEX] = wc.ofs; return 0; } static int __logfs_truncate(struct inode *inode, u64 size) { int ret; if (size >= logfs_factor(logfs_inode(inode)->li_height)) return 0; ret = logfs_truncate_rec(inode, size); if (ret) return ret; return logfs_truncate_direct(inode, size); } /* * Truncate, by changing the segment file, can consume a fair amount * of resources. So back off from time to time and do some GC. * 8 or 2048 blocks should be well within safety limits even if * every single block resided in a different segment. */ #define TRUNCATE_STEP (8 * 1024 * 1024) int logfs_truncate(struct inode *inode, u64 target) { struct super_block *sb = inode->i_sb; u64 size = i_size_read(inode); int err = 0; size = ALIGN(size, TRUNCATE_STEP); while (size > target) { if (size > TRUNCATE_STEP) size -= TRUNCATE_STEP; else size = 0; if (size < target) size = target; logfs_get_wblocks(sb, NULL, 1); err = __logfs_truncate(inode, size); if (!err) err = __logfs_write_inode(inode, 0); logfs_put_wblocks(sb, NULL, 1); } if (!err) err = vmtruncate(inode, target); /* I don't trust error recovery yet. */ WARN_ON(err); return err; } static void move_page_to_inode(struct inode *inode, struct page *page) { struct logfs_inode *li = logfs_inode(inode); struct logfs_block *block = logfs_block(page); if (!block) return; log_blockmove("move_page_to_inode(%llx, %llx, %x)\n", block->ino, block->bix, block->level); BUG_ON(li->li_block); block->ops = &inode_block_ops; block->inode = inode; li->li_block = block; block->page = NULL; page->private = 0; ClearPagePrivate(page); } static void move_inode_to_page(struct page *page, struct inode *inode) { struct logfs_inode *li = logfs_inode(inode); struct logfs_block *block = li->li_block; if (!block) return; log_blockmove("move_inode_to_page(%llx, %llx, %x)\n", block->ino, block->bix, block->level); BUG_ON(PagePrivate(page)); block->ops = &indirect_block_ops; block->page = page; page->private = (unsigned long)block; SetPagePrivate(page); block->inode = NULL; li->li_block = NULL; } int logfs_read_inode(struct inode *inode) { struct super_block *sb = inode->i_sb; struct logfs_super *super = logfs_super(sb); struct inode *master_inode = super->s_master_inode; struct page *page; struct logfs_disk_inode *di; u64 ino = inode->i_ino; if (ino << sb->s_blocksize_bits > i_size_read(master_inode)) return -ENODATA; if (!logfs_exist_block(master_inode, ino)) return -ENODATA; page = read_cache_page(master_inode->i_mapping, ino, (filler_t *)logfs_readpage, NULL); if (IS_ERR(page)) return PTR_ERR(page); di = kmap_atomic(page, KM_USER0); logfs_disk_to_inode(di, inode); kunmap_atomic(di, KM_USER0); move_page_to_inode(inode, page); page_cache_release(page); return 0; } /* Caller must logfs_put_write_page(page); */ static struct page *inode_to_page(struct inode *inode) { struct inode *master_inode = logfs_super(inode->i_sb)->s_master_inode; struct logfs_disk_inode *di; struct page *page; BUG_ON(inode->i_ino == LOGFS_INO_MASTER); page = logfs_get_write_page(master_inode, inode->i_ino, 0); if (!page) return NULL; di = kmap_atomic(page, KM_USER0); logfs_inode_to_disk(inode, di); kunmap_atomic(di, KM_USER0); move_inode_to_page(page, inode); return page; } static int do_write_inode(struct inode *inode) { struct super_block *sb = inode->i_sb; struct inode *master_inode = logfs_super(sb)->s_master_inode; loff_t size = (inode->i_ino + 1) << inode->i_sb->s_blocksize_bits; struct page *page; int err; BUG_ON(inode->i_ino == LOGFS_INO_MASTER); /* FIXME: lock inode */ if (i_size_read(master_inode) < size) i_size_write(master_inode, size); /* TODO: Tell vfs this inode is clean now */ page = inode_to_page(inode); if (!page) return -ENOMEM; /* FIXME: transaction is part of logfs_block now. Is that enough? */ err = logfs_write_buf(master_inode, page, 0); if (err) move_page_to_inode(inode, page); logfs_put_write_page(page); return err; } static void logfs_mod_segment_entry(struct super_block *sb, u32 segno, int write, void (*change_se)(struct logfs_segment_entry *, long), long arg) { struct logfs_super *super = logfs_super(sb); struct inode *inode; struct page *page; struct logfs_segment_entry *se; pgoff_t page_no; int child_no; page_no = segno >> (sb->s_blocksize_bits - 3); child_no = segno & ((sb->s_blocksize >> 3) - 1); inode = super->s_segfile_inode; page = logfs_get_write_page(inode, page_no, 0); BUG_ON(!page); /* FIXME: We need some reserve page for this case */ if (!PageUptodate(page)) logfs_read_block(inode, page, WRITE); if (write) alloc_indirect_block(inode, page, 0); se = kmap_atomic(page, KM_USER0); change_se(se + child_no, arg); if (write) { logfs_set_alias(sb, logfs_block(page), child_no); BUG_ON((int)be32_to_cpu(se[child_no].valid) > super->s_segsize); } kunmap_atomic(se, KM_USER0); logfs_put_write_page(page); } static void __get_segment_entry(struct logfs_segment_entry *se, long _target) { struct logfs_segment_entry *target = (void *)_target; *target = *se; } void logfs_get_segment_entry(struct super_block *sb, u32 segno, struct logfs_segment_entry *se) { logfs_mod_segment_entry(sb, segno, 0, __get_segment_entry, (long)se); } static void __set_segment_used(struct logfs_segment_entry *se, long increment) { u32 valid; valid = be32_to_cpu(se->valid); valid += increment; se->valid = cpu_to_be32(valid); } void logfs_set_segment_used(struct super_block *sb, u64 ofs, int increment) { struct logfs_super *super = logfs_super(sb); u32 segno = ofs >> super->s_segshift; if (!increment) return; logfs_mod_segment_entry(sb, segno, 1, __set_segment_used, increment); } static void __set_segment_erased(struct logfs_segment_entry *se, long ec_level) { se->ec_level = cpu_to_be32(ec_level); } void logfs_set_segment_erased(struct super_block *sb, u32 segno, u32 ec, gc_level_t gc_level) { u32 ec_level = ec << 4 | (__force u8)gc_level; logfs_mod_segment_entry(sb, segno, 1, __set_segment_erased, ec_level); } static void __set_segment_reserved(struct logfs_segment_entry *se, long ignore) { se->valid = cpu_to_be32(RESERVED); } void logfs_set_segment_reserved(struct super_block *sb, u32 segno) { logfs_mod_segment_entry(sb, segno, 1, __set_segment_reserved, 0); } static void __set_segment_unreserved(struct logfs_segment_entry *se, long ec_level) { se->valid = 0; se->ec_level = cpu_to_be32(ec_level); } void logfs_set_segment_unreserved(struct super_block *sb, u32 segno, u32 ec) { u32 ec_level = ec << 4; logfs_mod_segment_entry(sb, segno, 1, __set_segment_unreserved, ec_level); } int __logfs_write_inode(struct inode *inode, long flags) { struct super_block *sb = inode->i_sb; int ret; logfs_get_wblocks(sb, NULL, flags & WF_LOCK); ret = do_write_inode(inode); logfs_put_wblocks(sb, NULL, flags & WF_LOCK); return ret; } static int do_delete_inode(struct inode *inode) { struct super_block *sb = inode->i_sb; struct inode *master_inode = logfs_super(sb)->s_master_inode; struct page *page; int ret; page = logfs_get_write_page(master_inode, inode->i_ino, 0); if (!page) return -ENOMEM; move_inode_to_page(page, inode); logfs_get_wblocks(sb, page, 1); ret = __logfs_delete(master_inode, page); logfs_put_wblocks(sb, page, 1); logfs_put_write_page(page); return ret; } /* * ZOMBIE inodes have already been deleted before and should remain dead, * if it weren't for valid checking. No need to kill them again here. */ void logfs_evict_inode(struct inode *inode) { struct super_block *sb = inode->i_sb; struct logfs_inode *li = logfs_inode(inode); struct logfs_block *block = li->li_block; struct page *page; if (!inode->i_nlink) { if (!(li->li_flags & LOGFS_IF_ZOMBIE)) { li->li_flags |= LOGFS_IF_ZOMBIE; if (i_size_read(inode) > 0) logfs_truncate(inode, 0); do_delete_inode(inode); } } truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); /* Cheaper version of write_inode. All changes are concealed in * aliases, which are moved back. No write to the medium happens. */ /* Only deleted files may be dirty at this point */ BUG_ON(inode->i_state & I_DIRTY && inode->i_nlink); if (!block) return; if ((logfs_super(sb)->s_flags & LOGFS_SB_FLAG_SHUTDOWN)) { block->ops->free_block(inode->i_sb, block); return; } BUG_ON(inode->i_ino < LOGFS_RESERVED_INOS); page = inode_to_page(inode); BUG_ON(!page); /* FIXME: Use emergency page */ logfs_put_write_page(page); } void btree_write_block(struct logfs_block *block) { struct inode *inode; struct page *page; int err, cookie; inode = logfs_safe_iget(block->sb, block->ino, &cookie); page = logfs_get_write_page(inode, block->bix, block->level); err = logfs_readpage_nolock(page); BUG_ON(err); BUG_ON(!PagePrivate(page)); BUG_ON(logfs_block(page) != block); err = __logfs_write_buf(inode, page, 0); BUG_ON(err); BUG_ON(PagePrivate(page) || page->private); logfs_put_write_page(page); logfs_safe_iput(inode, cookie); } /** * logfs_inode_write - write inode or dentry objects * * @inode: parent inode (ifile or directory) * @buf: object to write (inode or dentry) * @n: object size * @_pos: object number (file position in blocks/objects) * @flags: write flags * @lock: 0 if write lock is already taken, 1 otherwise * @shadow_tree: shadow below this inode * * FIXME: All caller of this put a 200-300 byte variable on the stack, * only to call here and do a memcpy from that stack variable. A good * example of wasted performance and stack space. */ int logfs_inode_write(struct inode *inode, const void *buf, size_t count, loff_t bix, long flags, struct shadow_tree *shadow_tree) { loff_t pos = bix << inode->i_sb->s_blocksize_bits; int err; struct page *page; void *pagebuf; BUG_ON(pos & (LOGFS_BLOCKSIZE-1)); BUG_ON(count > LOGFS_BLOCKSIZE); page = logfs_get_write_page(inode, bix, 0); if (!page) return -ENOMEM; pagebuf = kmap_atomic(page, KM_USER0); memcpy(pagebuf, buf, count); flush_dcache_page(page); kunmap_atomic(pagebuf, KM_USER0); if (i_size_read(inode) < pos + LOGFS_BLOCKSIZE) i_size_write(inode, pos + LOGFS_BLOCKSIZE); err = logfs_write_buf(inode, page, flags); logfs_put_write_page(page); return err; } int logfs_open_segfile(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct inode *inode; inode = logfs_read_meta_inode(sb, LOGFS_INO_SEGFILE); if (IS_ERR(inode)) return PTR_ERR(inode); super->s_segfile_inode = inode; return 0; } int logfs_init_rw(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); int min_fill = 3 * super->s_no_blocks; INIT_LIST_HEAD(&super->s_object_alias); INIT_LIST_HEAD(&super->s_writeback_list); mutex_init(&super->s_write_mutex); super->s_block_pool = mempool_create_kmalloc_pool(min_fill, sizeof(struct logfs_block)); super->s_shadow_pool = mempool_create_kmalloc_pool(min_fill, sizeof(struct logfs_shadow)); return 0; } void logfs_cleanup_rw(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); logfs_mempool_destroy(super->s_block_pool); logfs_mempool_destroy(super->s_shadow_pool); }
gpl-2.0
Marvellousteam/android_kernel_htc_msm7227
arch/sh/kernel/cpu/sh3/clock-sh7710.c
4001
1886
/* * arch/sh/kernel/cpu/sh3/clock-sh7710.c * * SH7710 support for the clock framework * * Copyright (C) 2005 Paul Mundt * * FRQCR parsing hacked out of arch/sh/kernel/time.c * * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> * Copyright (C) 2002, 2003, 2004 Paul Mundt * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/clock.h> #include <asm/freq.h> #include <asm/io.h> static int md_table[] = { 1, 2, 3, 4, 6, 8, 12 }; static void master_clk_init(struct clk *clk) { clk->rate *= md_table[__raw_readw(FRQCR) & 0x0007]; } static struct clk_ops sh7710_master_clk_ops = { .init = master_clk_init, }; static unsigned long module_clk_recalc(struct clk *clk) { int idx = (__raw_readw(FRQCR) & 0x0007); return clk->parent->rate / md_table[idx]; } static struct clk_ops sh7710_module_clk_ops = { .recalc = module_clk_recalc, }; static unsigned long bus_clk_recalc(struct clk *clk) { int idx = (__raw_readw(FRQCR) & 0x0700) >> 8; return clk->parent->rate / md_table[idx]; } static struct clk_ops sh7710_bus_clk_ops = { .recalc = bus_clk_recalc, }; static unsigned long cpu_clk_recalc(struct clk *clk) { int idx = (__raw_readw(FRQCR) & 0x0070) >> 4; return clk->parent->rate / md_table[idx]; } static struct clk_ops sh7710_cpu_clk_ops = { .recalc = cpu_clk_recalc, }; static struct clk_ops *sh7710_clk_ops[] = { &sh7710_master_clk_ops, &sh7710_module_clk_ops, &sh7710_bus_clk_ops, &sh7710_cpu_clk_ops, }; void __init arch_init_clk_ops(struct clk_ops **ops, int idx) { if (idx < ARRAY_SIZE(sh7710_clk_ops)) *ops = sh7710_clk_ops[idx]; }
gpl-2.0
TheNameIsNigel/kernel_common
drivers/staging/rtl8712/usb_halinit.c
4257
12186
/****************************************************************************** * usb_halinit.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com> * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #define _HCI_HAL_INIT_C_ #include "osdep_service.h" #include "drv_types.h" #include "usb_ops.h" #include "usb_osintf.h" u8 r8712_usb_hal_bus_init(struct _adapter *padapter) { u8 val8 = 0; u8 ret = _SUCCESS; int PollingCnt = 20; struct registry_priv *pregistrypriv = &padapter->registrypriv; if (pregistrypriv->chip_version == RTL8712_FPGA) { val8 = 0x01; /* switch to 80M clock */ r8712_write8(padapter, SYS_CLKR, val8); val8 = r8712_read8(padapter, SPS1_CTRL); val8 = val8 | 0x01; /* enable VSPS12 LDO Macro block */ r8712_write8(padapter, SPS1_CTRL, val8); val8 = r8712_read8(padapter, AFE_MISC); val8 = val8 | 0x01; /* Enable AFE Macro Block's Bandgap */ r8712_write8(padapter, AFE_MISC, val8); val8 = r8712_read8(padapter, LDOA15_CTRL); val8 = val8 | 0x01; /* enable LDOA15 block */ r8712_write8(padapter, LDOA15_CTRL, val8); val8 = r8712_read8(padapter, SPS1_CTRL); val8 = val8 | 0x02; /* Enable VSPS12_SW Macro Block */ r8712_write8(padapter, SPS1_CTRL, val8); val8 = r8712_read8(padapter, AFE_MISC); val8 = val8 | 0x02; /* Enable AFE Macro Block's Mbias */ r8712_write8(padapter, AFE_MISC, val8); val8 = r8712_read8(padapter, SYS_ISO_CTRL + 1); val8 = val8 | 0x08; /* isolate PCIe Analog 1.2V to PCIe 3.3V and PCIE Digital */ r8712_write8(padapter, SYS_ISO_CTRL + 1, val8); val8 = r8712_read8(padapter, SYS_ISO_CTRL + 1); val8 = val8 & 0xEF; /* attatch AFE PLL to MACTOP/BB/PCIe Digital */ r8712_write8(padapter, SYS_ISO_CTRL + 1, val8); val8 = r8712_read8(padapter, AFE_XTAL_CTRL + 1); val8 = val8 & 0xFB; /* enable AFE clock */ r8712_write8(padapter, AFE_XTAL_CTRL + 1, val8); val8 = r8712_read8(padapter, AFE_PLL_CTRL); val8 = val8 | 0x01; /* Enable AFE PLL Macro Block */ r8712_write8(padapter, AFE_PLL_CTRL, val8); val8 = 0xEE; /* release isolation AFE PLL & MD */ r8712_write8(padapter, SYS_ISO_CTRL, val8); val8 = r8712_read8(padapter, SYS_CLKR + 1); val8 = val8 | 0x08; /* enable MAC clock */ r8712_write8(padapter, SYS_CLKR + 1, val8); val8 = r8712_read8(padapter, SYS_FUNC_EN + 1); val8 = val8 | 0x08; /* enable Core digital and enable IOREG R/W */ r8712_write8(padapter, SYS_FUNC_EN + 1, val8); val8 = val8 | 0x80; /* enable REG_EN */ r8712_write8(padapter, SYS_FUNC_EN + 1, val8); val8 = r8712_read8(padapter, SYS_CLKR + 1); val8 = (val8 | 0x80) & 0xBF; /* switch the control path */ r8712_write8(padapter, SYS_CLKR + 1, val8); val8 = 0xFC; r8712_write8(padapter, CR, val8); val8 = 0x37; r8712_write8(padapter, CR + 1, val8); /* reduce EndPoint & init it */ r8712_write8(padapter, 0x102500ab, r8712_read8(padapter, 0x102500ab) | BIT(6) | BIT(7)); /* consideration of power consumption - init */ r8712_write8(padapter, 0x10250008, r8712_read8(padapter, 0x10250008) & 0xfffffffb); } else if (pregistrypriv->chip_version == RTL8712_1stCUT) { /* Initialization for power on sequence, */ r8712_write8(padapter, SPS0_CTRL + 1, 0x53); r8712_write8(padapter, SPS0_CTRL, 0x57); /* Enable AFE Macro Block's Bandgap and Enable AFE Macro * Block's Mbias */ val8 = r8712_read8(padapter, AFE_MISC); r8712_write8(padapter, AFE_MISC, (val8 | AFE_MISC_BGEN | AFE_MISC_MBEN)); /* Enable LDOA15 block */ val8 = r8712_read8(padapter, LDOA15_CTRL); r8712_write8(padapter, LDOA15_CTRL, (val8 | LDA15_EN)); val8 = r8712_read8(padapter, SPS1_CTRL); r8712_write8(padapter, SPS1_CTRL, (val8 | SPS1_LDEN)); msleep(20); /* Enable Switch Regulator Block */ val8 = r8712_read8(padapter, SPS1_CTRL); r8712_write8(padapter, SPS1_CTRL, (val8 | SPS1_SWEN)); r8712_write32(padapter, SPS1_CTRL, 0x00a7b267); val8 = r8712_read8(padapter, SYS_ISO_CTRL + 1); r8712_write8(padapter, SYS_ISO_CTRL + 1, (val8 | 0x08)); /* Engineer Packet CP test Enable */ val8 = r8712_read8(padapter, SYS_FUNC_EN + 1); r8712_write8(padapter, SYS_FUNC_EN + 1, (val8 | 0x20)); val8 = r8712_read8(padapter, SYS_ISO_CTRL + 1); r8712_write8(padapter, SYS_ISO_CTRL + 1, (val8 & 0x6F)); /* Enable AFE clock */ val8 = r8712_read8(padapter, AFE_XTAL_CTRL + 1); r8712_write8(padapter, AFE_XTAL_CTRL + 1, (val8 & 0xfb)); /* Enable AFE PLL Macro Block */ val8 = r8712_read8(padapter, AFE_PLL_CTRL); r8712_write8(padapter, AFE_PLL_CTRL, (val8 | 0x11)); /* Attach AFE PLL to MACTOP/BB/PCIe Digital */ val8 = r8712_read8(padapter, SYS_ISO_CTRL); r8712_write8(padapter, SYS_ISO_CTRL, (val8 & 0xEE)); /* Switch to 40M clock */ val8 = r8712_read8(padapter, SYS_CLKR); r8712_write8(padapter, SYS_CLKR, val8 & (~SYS_CLKSEL)); /* SSC Disable */ val8 = r8712_read8(padapter, SYS_CLKR); /* Enable MAC clock */ val8 = r8712_read8(padapter, SYS_CLKR + 1); r8712_write8(padapter, SYS_CLKR + 1, (val8 | 0x18)); /* Revised POS, */ r8712_write8(padapter, PMC_FSM, 0x02); /* Enable Core digital and enable IOREG R/W */ val8 = r8712_read8(padapter, SYS_FUNC_EN + 1); r8712_write8(padapter, SYS_FUNC_EN + 1, (val8 | 0x08)); /* Enable REG_EN */ val8 = r8712_read8(padapter, SYS_FUNC_EN + 1); r8712_write8(padapter, SYS_FUNC_EN + 1, (val8 | 0x80)); /* Switch the control path to FW */ val8 = r8712_read8(padapter, SYS_CLKR + 1); r8712_write8(padapter, SYS_CLKR + 1, (val8 | 0x80) & 0xBF); r8712_write8(padapter, CR, 0xFC); r8712_write8(padapter, CR + 1, 0x37); /* Fix the RX FIFO issue(usb error), */ val8 = r8712_read8(padapter, 0x1025FE5c); r8712_write8(padapter, 0x1025FE5c, (val8|BIT(7))); val8 = r8712_read8(padapter, 0x102500ab); r8712_write8(padapter, 0x102500ab, (val8|BIT(6)|BIT(7))); /* For power save, used this in the bit file after 970621 */ val8 = r8712_read8(padapter, SYS_CLKR); r8712_write8(padapter, SYS_CLKR, val8&(~CPU_CLKSEL)); } else if (pregistrypriv->chip_version == RTL8712_2ndCUT || pregistrypriv->chip_version == RTL8712_3rdCUT) { /* Initialization for power on sequence, * E-Fuse leakage prevention sequence */ r8712_write8(padapter, 0x37, 0xb0); msleep(20); r8712_write8(padapter, 0x37, 0x30); /* Set control path switch to HW control and reset Digital Core, * CPU Core and MAC I/O to solve FW download fail when system * from resume sate. */ val8 = r8712_read8(padapter, SYS_CLKR + 1); if (val8 & 0x80) { val8 &= 0x3f; r8712_write8(padapter, SYS_CLKR + 1, val8); } val8 = r8712_read8(padapter, SYS_FUNC_EN + 1); val8 &= 0x73; r8712_write8(padapter, SYS_FUNC_EN + 1, val8); msleep(20); /* Revised POS, */ /* Enable AFE Macro Block's Bandgap and Enable AFE Macro * Block's Mbias */ r8712_write8(padapter, SPS0_CTRL + 1, 0x53); r8712_write8(padapter, SPS0_CTRL, 0x57); val8 = r8712_read8(padapter, AFE_MISC); /*Bandgap*/ r8712_write8(padapter, AFE_MISC, (val8 | AFE_MISC_BGEN)); r8712_write8(padapter, AFE_MISC, (val8 | AFE_MISC_BGEN | AFE_MISC_MBEN | AFE_MISC_I32_EN)); /* Enable PLL Power (LDOA15V) */ val8 = r8712_read8(padapter, LDOA15_CTRL); r8712_write8(padapter, LDOA15_CTRL, (val8 | LDA15_EN)); /* Enable LDOV12D block */ val8 = r8712_read8(padapter, LDOV12D_CTRL); r8712_write8(padapter, LDOV12D_CTRL, (val8 | LDV12_EN)); val8 = r8712_read8(padapter, SYS_ISO_CTRL + 1); r8712_write8(padapter, SYS_ISO_CTRL + 1, (val8 | 0x08)); /* Engineer Packet CP test Enable */ val8 = r8712_read8(padapter, SYS_FUNC_EN + 1); r8712_write8(padapter, SYS_FUNC_EN + 1, (val8 | 0x20)); /* Support 64k IMEM */ val8 = r8712_read8(padapter, SYS_ISO_CTRL + 1); r8712_write8(padapter, SYS_ISO_CTRL + 1, (val8 & 0x68)); /* Enable AFE clock */ val8 = r8712_read8(padapter, AFE_XTAL_CTRL + 1); r8712_write8(padapter, AFE_XTAL_CTRL + 1, (val8 & 0xfb)); /* Enable AFE PLL Macro Block */ val8 = r8712_read8(padapter, AFE_PLL_CTRL); r8712_write8(padapter, AFE_PLL_CTRL, (val8 | 0x11)); /* Some sample will download fw failure. The clock will be * stable with 500 us delay after reset the PLL * TODO: When usleep is added to kernel, change next 3 * udelay(500) to usleep(500) */ udelay(500); r8712_write8(padapter, AFE_PLL_CTRL, (val8 | 0x51)); udelay(500); r8712_write8(padapter, AFE_PLL_CTRL, (val8 | 0x11)); udelay(500); /* Attach AFE PLL to MACTOP/BB/PCIe Digital */ val8 = r8712_read8(padapter, SYS_ISO_CTRL); r8712_write8(padapter, SYS_ISO_CTRL, (val8 & 0xEE)); /* Switch to 40M clock */ r8712_write8(padapter, SYS_CLKR, 0x00); /* CPU Clock and 80M Clock SSC Disable to overcome FW download * fail timing issue. */ val8 = r8712_read8(padapter, SYS_CLKR); r8712_write8(padapter, SYS_CLKR, (val8 | 0xa0)); /* Enable MAC clock */ val8 = r8712_read8(padapter, SYS_CLKR + 1); r8712_write8(padapter, SYS_CLKR + 1, (val8 | 0x18)); /* Revised POS, */ r8712_write8(padapter, PMC_FSM, 0x02); /* Enable Core digital and enable IOREG R/W */ val8 = r8712_read8(padapter, SYS_FUNC_EN + 1); r8712_write8(padapter, SYS_FUNC_EN + 1, (val8 | 0x08)); /* Enable REG_EN */ val8 = r8712_read8(padapter, SYS_FUNC_EN + 1); r8712_write8(padapter, SYS_FUNC_EN + 1, (val8 | 0x80)); /* Switch the control path to FW */ val8 = r8712_read8(padapter, SYS_CLKR + 1); r8712_write8(padapter, SYS_CLKR + 1, (val8 | 0x80) & 0xBF); r8712_write8(padapter, CR, 0xFC); r8712_write8(padapter, CR + 1, 0x37); /* Fix the RX FIFO issue(usb error), 970410 */ val8 = r8712_read8(padapter, 0x1025FE5c); r8712_write8(padapter, 0x1025FE5c, (val8 | BIT(7))); /* For power save, used this in the bit file after 970621 */ val8 = r8712_read8(padapter, SYS_CLKR); r8712_write8(padapter, SYS_CLKR, val8 & (~CPU_CLKSEL)); /* Revised for 8051 ROM code wrong operation. */ r8712_write8(padapter, 0x1025fe1c, 0x80); /* To make sure that TxDMA can ready to download FW. * We should reset TxDMA if IMEM RPT was not ready. */ do { val8 = r8712_read8(padapter, TCR); if ((val8 & _TXDMA_INIT_VALUE) == _TXDMA_INIT_VALUE) break; udelay(5); /* PlatformStallExecution(5); */ } while (PollingCnt--); /* Delay 1ms */ if (PollingCnt <= 0) { val8 = r8712_read8(padapter, CR); r8712_write8(padapter, CR, val8&(~_TXDMA_EN)); udelay(2); /* PlatformStallExecution(2); */ /* Reset TxDMA */ r8712_write8(padapter, CR, val8|_TXDMA_EN); } } else ret = _FAIL; return ret; } unsigned int r8712_usb_inirp_init(struct _adapter *padapter) { u8 i; struct recv_buf *precvbuf; struct intf_hdl *pintfhdl = &padapter->pio_queue->intf; struct recv_priv *precvpriv = &(padapter->recvpriv); precvpriv->ff_hwaddr = RTL8712_DMA_RX0FF; /* mapping rx fifo address */ /* issue Rx irp to receive data */ precvbuf = (struct recv_buf *)precvpriv->precv_buf; for (i = 0; i < NR_RECVBUFF; i++) { if (r8712_usb_read_port(pintfhdl, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf) == false) return _FAIL; precvbuf++; precvpriv->free_recv_buf_queue_cnt--; } return _SUCCESS; } unsigned int r8712_usb_inirp_deinit(struct _adapter *padapter) { r8712_usb_read_port_cancel(padapter); return _SUCCESS; }
gpl-2.0
darshan1205/yu_kernel
drivers/staging/rtl8712/usb_ops.c
4257
4877
/****************************************************************************** * usb_ops.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com> * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #define _HCI_OPS_C_ #include "osdep_service.h" #include "drv_types.h" #include "osdep_intf.h" #include "usb_ops.h" #include "recv_osdep.h" static u8 usb_read8(struct intf_hdl *pintfhdl, u32 addr) { u8 request; u8 requesttype; u16 wvalue; u16 index; u16 len; u32 data; struct intf_priv *pintfpriv = pintfhdl->pintfpriv; request = 0x05; requesttype = 0x01; /* read_in */ index = 0; wvalue = (u16)(addr&0x0000ffff); len = 1; r8712_usbctrl_vendorreq(pintfpriv, request, wvalue, index, &data, len, requesttype); return (u8)(le32_to_cpu(data)&0x0ff); } static u16 usb_read16(struct intf_hdl *pintfhdl, u32 addr) { u8 request; u8 requesttype; u16 wvalue; u16 index; u16 len; u32 data; struct intf_priv *pintfpriv = pintfhdl->pintfpriv; request = 0x05; requesttype = 0x01; /* read_in */ index = 0; wvalue = (u16)(addr&0x0000ffff); len = 2; r8712_usbctrl_vendorreq(pintfpriv, request, wvalue, index, &data, len, requesttype); return (u16)(le32_to_cpu(data)&0xffff); } static u32 usb_read32(struct intf_hdl *pintfhdl, u32 addr) { u8 request; u8 requesttype; u16 wvalue; u16 index; u16 len; u32 data; struct intf_priv *pintfpriv = pintfhdl->pintfpriv; request = 0x05; requesttype = 0x01; /* read_in */ index = 0; wvalue = (u16)(addr&0x0000ffff); len = 4; r8712_usbctrl_vendorreq(pintfpriv, request, wvalue, index, &data, len, requesttype); return le32_to_cpu(data); } static void usb_write8(struct intf_hdl *pintfhdl, u32 addr, u8 val) { u8 request; u8 requesttype; u16 wvalue; u16 index; u16 len; u32 data; struct intf_priv *pintfpriv = pintfhdl->pintfpriv; request = 0x05; requesttype = 0x00; /* write_out */ index = 0; wvalue = (u16)(addr&0x0000ffff); len = 1; data = val; data = cpu_to_le32(data&0x000000ff); r8712_usbctrl_vendorreq(pintfpriv, request, wvalue, index, &data, len, requesttype); } static void usb_write16(struct intf_hdl *pintfhdl, u32 addr, u16 val) { u8 request; u8 requesttype; u16 wvalue; u16 index; u16 len; u32 data; struct intf_priv *pintfpriv = pintfhdl->pintfpriv; request = 0x05; requesttype = 0x00; /* write_out */ index = 0; wvalue = (u16)(addr&0x0000ffff); len = 2; data = val; data = cpu_to_le32(data&0x0000ffff); r8712_usbctrl_vendorreq(pintfpriv, request, wvalue, index, &data, len, requesttype); } static void usb_write32(struct intf_hdl *pintfhdl, u32 addr, u32 val) { u8 request; u8 requesttype; u16 wvalue; u16 index; u16 len; u32 data; struct intf_priv *pintfpriv = pintfhdl->pintfpriv; request = 0x05; requesttype = 0x00; /* write_out */ index = 0; wvalue = (u16)(addr&0x0000ffff); len = 4; data = cpu_to_le32(val); r8712_usbctrl_vendorreq(pintfpriv, request, wvalue, index, &data, len, requesttype); } void r8712_usb_set_intf_option(u32 *poption) { *poption = ((*poption) | _INTF_ASYNC_); } static void usb_intf_hdl_init(u8 *priv) { } static void usb_intf_hdl_unload(u8 *priv) { } static void usb_intf_hdl_open(u8 *priv) { } static void usb_intf_hdl_close(u8 *priv) { } void r8712_usb_set_intf_funs(struct intf_hdl *pintf_hdl) { pintf_hdl->intf_hdl_init = &usb_intf_hdl_init; pintf_hdl->intf_hdl_unload = &usb_intf_hdl_unload; pintf_hdl->intf_hdl_open = &usb_intf_hdl_open; pintf_hdl->intf_hdl_close = &usb_intf_hdl_close; } void r8712_usb_set_intf_ops(struct _io_ops *pops) { memset((u8 *)pops, 0, sizeof(struct _io_ops)); pops->_read8 = &usb_read8; pops->_read16 = &usb_read16; pops->_read32 = &usb_read32; pops->_read_port = &r8712_usb_read_port; pops->_write8 = &usb_write8; pops->_write16 = &usb_write16; pops->_write32 = &usb_write32; pops->_write_mem = &r8712_usb_write_mem; pops->_write_port = &r8712_usb_write_port; }
gpl-2.0
zarboz/android_kernel_htc_dlx
virt/net/netfilter/ipset/ip_set_hash_netport.c
4769
17727
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* Kernel module implementing an IP set type: the hash:net,port type */ #include <linux/jhash.h> #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/random.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set_timeout.h> #include <linux/netfilter/ipset/ip_set_getport.h> #include <linux/netfilter/ipset/ip_set_hash.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_DESCRIPTION("hash:net,port type of IP sets"); MODULE_ALIAS("ip_set_hash:net,port"); /* Type specific function prefix */ #define TYPE hash_netport static bool hash_netport_same_set(const struct ip_set *a, const struct ip_set *b); #define hash_netport4_same_set hash_netport_same_set #define hash_netport6_same_set hash_netport_same_set /* The type variant functions: IPv4 */ /* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0 * However this way we have to store internally cidr - 1, * dancing back and forth. */ #define IP_SET_HASH_WITH_NETS_PACKED /* Member elements without timeout */ struct hash_netport4_elem { __be32 ip; __be16 port; u8 proto; u8 cidr:7; u8 nomatch:1; }; /* Member elements with timeout support */ struct hash_netport4_telem { __be32 ip; __be16 port; u8 proto; u8 cidr:7; u8 nomatch:1; unsigned long timeout; }; static inline bool hash_netport4_data_equal(const struct hash_netport4_elem *ip1, const struct hash_netport4_elem *ip2, u32 *multi) { return ip1->ip == ip2->ip && ip1->port == ip2->port && ip1->proto == ip2->proto && ip1->cidr == ip2->cidr; } static inline bool hash_netport4_data_isnull(const struct hash_netport4_elem *elem) { return elem->proto == 0; } static inline void hash_netport4_data_copy(struct hash_netport4_elem *dst, const struct hash_netport4_elem *src) { dst->ip = src->ip; dst->port = src->port; dst->proto = src->proto; dst->cidr = src->cidr; dst->nomatch = src->nomatch; } static inline void hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags) { dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); } static inline bool hash_netport4_data_match(const struct hash_netport4_elem *elem) { return !elem->nomatch; } static inline void hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr) { elem->ip &= ip_set_netmask(cidr); elem->cidr = cidr - 1; } static inline void hash_netport4_data_zero_out(struct hash_netport4_elem *elem) { elem->proto = 0; } static bool hash_netport4_data_list(struct sk_buff *skb, const struct hash_netport4_elem *data) { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1); NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); if (flags) NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); return 0; nla_put_failure: return 1; } static bool hash_netport4_data_tlist(struct sk_buff *skb, const struct hash_netport4_elem *data) { const struct hash_netport4_telem *tdata = (const struct hash_netport4_telem *)data; u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1); NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(ip_set_timeout_get(tdata->timeout))); if (flags) NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); return 0; nla_put_failure: return 1; } #define IP_SET_HASH_WITH_PROTO #define IP_SET_HASH_WITH_NETS #define PF 4 #define HOST_MASK 32 #include <linux/netfilter/ipset/ip_set_ahash.h> static inline void hash_netport4_data_next(struct ip_set_hash *h, const struct hash_netport4_elem *d) { h->next.ip = ntohl(d->ip); h->next.port = ntohs(d->port); } static int hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, const struct ip_set_adt_opt *opt) { const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netport4_elem data = { .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1 }; if (adt == IPSET_TEST) data.cidr = HOST_MASK - 1; if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, &data.port, &data.proto)) return -EINVAL; ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); data.ip &= ip_set_netmask(data.cidr + 1); return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); } static int hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netport4_elem data = { .cidr = HOST_MASK - 1 }; u32 port, port_to, p = 0, ip = 0, ip_to, last; u32 timeout = h->timeout; bool with_ports = false; u8 cidr; int ret; if (unlikely(!tb[IPSET_ATTR_IP] || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); if (ret) return ret; if (tb[IPSET_ATTR_CIDR]) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (!cidr || cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; data.cidr = cidr - 1; } if (tb[IPSET_ATTR_PORT]) data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); else return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_PROTO]) { data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); with_ports = ip_set_proto_with_ports(data.proto); if (data.proto == 0) return -IPSET_ERR_INVALID_PROTO; } else return -IPSET_ERR_MISSING_PROTO; if (!(with_ports || data.proto == IPPROTO_ICMP)) data.port = 0; if (tb[IPSET_ATTR_TIMEOUT]) { if (!with_timeout(h->timeout)) return -IPSET_ERR_TIMEOUT; timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (cadt_flags << 16); } if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) { data.ip = htonl(ip & ip_set_hostmask(data.cidr + 1)); ret = adtfn(set, &data, timeout, flags); return ip_set_eexist(ret, flags) ? 0 : ret; } port = port_to = ntohs(data.port); if (tb[IPSET_ATTR_PORT_TO]) { port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); if (port_to < port) swap(port, port_to); } if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; if (ip_to < ip) swap(ip, ip_to); if (ip + UINT_MAX == ip_to) return -IPSET_ERR_HASH_RANGE; } else { ip_set_mask_from_to(ip, ip_to, data.cidr + 1); } if (retried) ip = h->next.ip; while (!after(ip, ip_to)) { data.ip = htonl(ip); last = ip_set_range_to_cidr(ip, ip_to, &cidr); data.cidr = cidr - 1; p = retried && ip == h->next.ip ? h->next.port : port; for (; p <= port_to; p++) { data.port = htons(p); ret = adtfn(set, &data, timeout, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; else ret = 0; } ip = last + 1; } return ret; } static bool hash_netport_same_set(const struct ip_set *a, const struct ip_set *b) { const struct ip_set_hash *x = a->data; const struct ip_set_hash *y = b->data; /* Resizing changes htable_bits, so we ignore it */ return x->maxelem == y->maxelem && x->timeout == y->timeout; } /* The type variant functions: IPv6 */ struct hash_netport6_elem { union nf_inet_addr ip; __be16 port; u8 proto; u8 cidr:7; u8 nomatch:1; }; struct hash_netport6_telem { union nf_inet_addr ip; __be16 port; u8 proto; u8 cidr:7; u8 nomatch:1; unsigned long timeout; }; static inline bool hash_netport6_data_equal(const struct hash_netport6_elem *ip1, const struct hash_netport6_elem *ip2, u32 *multi) { return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && ip1->port == ip2->port && ip1->proto == ip2->proto && ip1->cidr == ip2->cidr; } static inline bool hash_netport6_data_isnull(const struct hash_netport6_elem *elem) { return elem->proto == 0; } static inline void hash_netport6_data_copy(struct hash_netport6_elem *dst, const struct hash_netport6_elem *src) { memcpy(dst, src, sizeof(*dst)); } static inline void hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags) { dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); } static inline bool hash_netport6_data_match(const struct hash_netport6_elem *elem) { return !elem->nomatch; } static inline void hash_netport6_data_zero_out(struct hash_netport6_elem *elem) { elem->proto = 0; } static inline void ip6_netmask(union nf_inet_addr *ip, u8 prefix) { ip->ip6[0] &= ip_set_netmask6(prefix)[0]; ip->ip6[1] &= ip_set_netmask6(prefix)[1]; ip->ip6[2] &= ip_set_netmask6(prefix)[2]; ip->ip6[3] &= ip_set_netmask6(prefix)[3]; } static inline void hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr) { ip6_netmask(&elem->ip, cidr); elem->cidr = cidr - 1; } static bool hash_netport6_data_list(struct sk_buff *skb, const struct hash_netport6_elem *data) { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1); NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); if (flags) NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); return 0; nla_put_failure: return 1; } static bool hash_netport6_data_tlist(struct sk_buff *skb, const struct hash_netport6_elem *data) { const struct hash_netport6_telem *e = (const struct hash_netport6_telem *)data; u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1); NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(ip_set_timeout_get(e->timeout))); if (flags) NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); return 0; nla_put_failure: return 1; } #undef PF #undef HOST_MASK #define PF 6 #define HOST_MASK 128 #include <linux/netfilter/ipset/ip_set_ahash.h> static inline void hash_netport6_data_next(struct ip_set_hash *h, const struct hash_netport6_elem *d) { h->next.port = ntohs(d->port); } static int hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, const struct ip_set_adt_opt *opt) { const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netport6_elem data = { .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1, }; if (adt == IPSET_TEST) data.cidr = HOST_MASK - 1; if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, &data.port, &data.proto)) return -EINVAL; ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); ip6_netmask(&data.ip, data.cidr + 1); return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); } static int hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netport6_elem data = { .cidr = HOST_MASK - 1 }; u32 port, port_to; u32 timeout = h->timeout; bool with_ports = false; u8 cidr; int ret; if (unlikely(!tb[IPSET_ATTR_IP] || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; if (unlikely(tb[IPSET_ATTR_IP_TO])) return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); if (ret) return ret; if (tb[IPSET_ATTR_CIDR]) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (!cidr || cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; data.cidr = cidr - 1; } ip6_netmask(&data.ip, data.cidr + 1); if (tb[IPSET_ATTR_PORT]) data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); else return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_PROTO]) { data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); with_ports = ip_set_proto_with_ports(data.proto); if (data.proto == 0) return -IPSET_ERR_INVALID_PROTO; } else return -IPSET_ERR_MISSING_PROTO; if (!(with_ports || data.proto == IPPROTO_ICMPV6)) data.port = 0; if (tb[IPSET_ATTR_TIMEOUT]) { if (!with_timeout(h->timeout)) return -IPSET_ERR_TIMEOUT; timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (cadt_flags << 16); } if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { ret = adtfn(set, &data, timeout, flags); return ip_set_eexist(ret, flags) ? 0 : ret; } port = ntohs(data.port); port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); if (port > port_to) swap(port, port_to); if (retried) port = h->next.port; for (; port <= port_to; port++) { data.port = htons(port); ret = adtfn(set, &data, timeout, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; else ret = 0; } return ret; } /* Create hash:ip type of sets */ static int hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags) { struct ip_set_hash *h; u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; u8 hbits; size_t hsize; if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) return -IPSET_ERR_INVALID_FAMILY; if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_HASHSIZE]) { hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); if (hashsize < IPSET_MIMINAL_HASHSIZE) hashsize = IPSET_MIMINAL_HASHSIZE; } if (tb[IPSET_ATTR_MAXELEM]) maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); h = kzalloc(sizeof(*h) + sizeof(struct ip_set_hash_nets) * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL); if (!h) return -ENOMEM; h->maxelem = maxelem; get_random_bytes(&h->initval, sizeof(h->initval)); h->timeout = IPSET_NO_TIMEOUT; hbits = htable_bits(hashsize); hsize = htable_size(hbits); if (hsize == 0) { kfree(h); return -ENOMEM; } h->table = ip_set_alloc(hsize); if (!h->table) { kfree(h); return -ENOMEM; } h->table->htable_bits = hbits; set->data = h; if (tb[IPSET_ATTR_TIMEOUT]) { h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); set->variant = set->family == NFPROTO_IPV4 ? &hash_netport4_tvariant : &hash_netport6_tvariant; if (set->family == NFPROTO_IPV4) hash_netport4_gc_init(set); else hash_netport6_gc_init(set); } else { set->variant = set->family == NFPROTO_IPV4 ? &hash_netport4_variant : &hash_netport6_variant; } pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", set->name, jhash_size(h->table->htable_bits), h->table->htable_bits, h->maxelem, set->data, h->table); return 0; } static struct ip_set_type hash_netport_type __read_mostly = { .name = "hash:net,port", .protocol = IPSET_PROTOCOL, .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, .dimension = IPSET_DIM_TWO, .family = NFPROTO_UNSPEC, .revision_min = 0, /* 1 SCTP and UDPLITE support added */ /* 2, Range as input support for IPv4 added */ .revision_max = 3, /* nomatch flag support added */ .create = hash_netport_create, .create_policy = { [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, }, .adt_policy = { [IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_PORT] = { .type = NLA_U16 }, [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, }, .me = THIS_MODULE, }; static int __init hash_netport_init(void) { return ip_set_type_register(&hash_netport_type); } static void __exit hash_netport_fini(void) { ip_set_type_unregister(&hash_netport_type); } module_init(hash_netport_init); module_exit(hash_netport_fini);
gpl-2.0
atl4ntis/kernel_msm
net/openvswitch/flow.c
4769
35782
/* * Copyright (c) 2007-2011 Nicira Networks. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA */ #include "flow.h" #include "datapath.h" #include <linux/uaccess.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <net/llc_pdu.h> #include <linux/kernel.h> #include <linux/jhash.h> #include <linux/jiffies.h> #include <linux/llc.h> #include <linux/module.h> #include <linux/in.h> #include <linux/rcupdate.h> #include <linux/if_arp.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/icmp.h> #include <linux/icmpv6.h> #include <linux/rculist.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/ndisc.h> static struct kmem_cache *flow_cache; static int check_header(struct sk_buff *skb, int len) { if (unlikely(skb->len < len)) return -EINVAL; if (unlikely(!pskb_may_pull(skb, len))) return -ENOMEM; return 0; } static bool arphdr_ok(struct sk_buff *skb) { return pskb_may_pull(skb, skb_network_offset(skb) + sizeof(struct arp_eth_header)); } static int check_iphdr(struct sk_buff *skb) { unsigned int nh_ofs = skb_network_offset(skb); unsigned int ip_len; int err; err = check_header(skb, nh_ofs + sizeof(struct iphdr)); if (unlikely(err)) return err; ip_len = ip_hdrlen(skb); if (unlikely(ip_len < sizeof(struct iphdr) || skb->len < nh_ofs + ip_len)) return -EINVAL; skb_set_transport_header(skb, nh_ofs + ip_len); return 0; } static bool tcphdr_ok(struct sk_buff *skb) { int th_ofs = skb_transport_offset(skb); int tcp_len; if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr)))) return false; tcp_len = tcp_hdrlen(skb); if (unlikely(tcp_len < sizeof(struct tcphdr) || skb->len < th_ofs + tcp_len)) return false; return true; } static bool udphdr_ok(struct sk_buff *skb) { return pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)); } static bool icmphdr_ok(struct sk_buff *skb) { return pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct icmphdr)); } u64 ovs_flow_used_time(unsigned long flow_jiffies) { struct timespec cur_ts; u64 cur_ms, idle_ms; ktime_get_ts(&cur_ts); idle_ms = jiffies_to_msecs(jiffies - flow_jiffies); cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC + cur_ts.tv_nsec / NSEC_PER_MSEC; return cur_ms - idle_ms; } #define SW_FLOW_KEY_OFFSET(field) \ (offsetof(struct sw_flow_key, field) + \ FIELD_SIZEOF(struct sw_flow_key, field)) static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key, int *key_lenp) { unsigned int nh_ofs = skb_network_offset(skb); unsigned int nh_len; int payload_ofs; struct ipv6hdr *nh; uint8_t nexthdr; __be16 frag_off; int err; *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label); err = check_header(skb, nh_ofs + sizeof(*nh)); if (unlikely(err)) return err; nh = ipv6_hdr(skb); nexthdr = nh->nexthdr; payload_ofs = (u8 *)(nh + 1) - skb->data; key->ip.proto = NEXTHDR_NONE; key->ip.tos = ipv6_get_dsfield(nh); key->ip.ttl = nh->hop_limit; key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); key->ipv6.addr.src = nh->saddr; key->ipv6.addr.dst = nh->daddr; payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off); if (unlikely(payload_ofs < 0)) return -EINVAL; if (frag_off) { if (frag_off & htons(~0x7)) key->ip.frag = OVS_FRAG_TYPE_LATER; else key->ip.frag = OVS_FRAG_TYPE_FIRST; } nh_len = payload_ofs - nh_ofs; skb_set_transport_header(skb, nh_ofs + nh_len); key->ip.proto = nexthdr; return nh_len; } static bool icmp6hdr_ok(struct sk_buff *skb) { return pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct icmp6hdr)); } #define TCP_FLAGS_OFFSET 13 #define TCP_FLAG_MASK 0x3f void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) { u8 tcp_flags = 0; if (flow->key.eth.type == htons(ETH_P_IP) && flow->key.ip.proto == IPPROTO_TCP && likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { u8 *tcp = (u8 *)tcp_hdr(skb); tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK; } spin_lock(&flow->lock); flow->used = jiffies; flow->packet_count++; flow->byte_count += skb->len; flow->tcp_flags |= tcp_flags; spin_unlock(&flow->lock); } struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions) { int actions_len = nla_len(actions); struct sw_flow_actions *sfa; /* At least DP_MAX_PORTS actions are required to be able to flood a * packet to every port. Factor of 2 allows for setting VLAN tags, * etc. */ if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4)) return ERR_PTR(-EINVAL); sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL); if (!sfa) return ERR_PTR(-ENOMEM); sfa->actions_len = actions_len; memcpy(sfa->actions, nla_data(actions), actions_len); return sfa; } struct sw_flow *ovs_flow_alloc(void) { struct sw_flow *flow; flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); if (!flow) return ERR_PTR(-ENOMEM); spin_lock_init(&flow->lock); flow->sf_acts = NULL; return flow; } static struct hlist_head *find_bucket(struct flow_table *table, u32 hash) { hash = jhash_1word(hash, table->hash_seed); return flex_array_get(table->buckets, (hash & (table->n_buckets - 1))); } static struct flex_array *alloc_buckets(unsigned int n_buckets) { struct flex_array *buckets; int i, err; buckets = flex_array_alloc(sizeof(struct hlist_head *), n_buckets, GFP_KERNEL); if (!buckets) return NULL; err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL); if (err) { flex_array_free(buckets); return NULL; } for (i = 0; i < n_buckets; i++) INIT_HLIST_HEAD((struct hlist_head *) flex_array_get(buckets, i)); return buckets; } static void free_buckets(struct flex_array *buckets) { flex_array_free(buckets); } struct flow_table *ovs_flow_tbl_alloc(int new_size) { struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL); if (!table) return NULL; table->buckets = alloc_buckets(new_size); if (!table->buckets) { kfree(table); return NULL; } table->n_buckets = new_size; table->count = 0; table->node_ver = 0; table->keep_flows = false; get_random_bytes(&table->hash_seed, sizeof(u32)); return table; } void ovs_flow_tbl_destroy(struct flow_table *table) { int i; if (!table) return; if (table->keep_flows) goto skip_flows; for (i = 0; i < table->n_buckets; i++) { struct sw_flow *flow; struct hlist_head *head = flex_array_get(table->buckets, i); struct hlist_node *node, *n; int ver = table->node_ver; hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) { hlist_del_rcu(&flow->hash_node[ver]); ovs_flow_free(flow); } } skip_flows: free_buckets(table->buckets); kfree(table); } static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) { struct flow_table *table = container_of(rcu, struct flow_table, rcu); ovs_flow_tbl_destroy(table); } void ovs_flow_tbl_deferred_destroy(struct flow_table *table) { if (!table) return; call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb); } struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last) { struct sw_flow *flow; struct hlist_head *head; struct hlist_node *n; int ver; int i; ver = table->node_ver; while (*bucket < table->n_buckets) { i = 0; head = flex_array_get(table->buckets, *bucket); hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) { if (i < *last) { i++; continue; } *last = i + 1; return flow; } (*bucket)++; *last = 0; } return NULL; } static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new) { int old_ver; int i; old_ver = old->node_ver; new->node_ver = !old_ver; /* Insert in new table. */ for (i = 0; i < old->n_buckets; i++) { struct sw_flow *flow; struct hlist_head *head; struct hlist_node *n; head = flex_array_get(old->buckets, i); hlist_for_each_entry(flow, n, head, hash_node[old_ver]) ovs_flow_tbl_insert(new, flow); } old->keep_flows = true; } static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets) { struct flow_table *new_table; new_table = ovs_flow_tbl_alloc(n_buckets); if (!new_table) return ERR_PTR(-ENOMEM); flow_table_copy_flows(table, new_table); return new_table; } struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table) { return __flow_tbl_rehash(table, table->n_buckets); } struct flow_table *ovs_flow_tbl_expand(struct flow_table *table) { return __flow_tbl_rehash(table, table->n_buckets * 2); } void ovs_flow_free(struct sw_flow *flow) { if (unlikely(!flow)) return; kfree((struct sf_flow_acts __force *)flow->sf_acts); kmem_cache_free(flow_cache, flow); } /* RCU callback used by ovs_flow_deferred_free. */ static void rcu_free_flow_callback(struct rcu_head *rcu) { struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); ovs_flow_free(flow); } /* Schedules 'flow' to be freed after the next RCU grace period. * The caller must hold rcu_read_lock for this to be sensible. */ void ovs_flow_deferred_free(struct sw_flow *flow) { call_rcu(&flow->rcu, rcu_free_flow_callback); } /* RCU callback used by ovs_flow_deferred_free_acts. */ static void rcu_free_acts_callback(struct rcu_head *rcu) { struct sw_flow_actions *sf_acts = container_of(rcu, struct sw_flow_actions, rcu); kfree(sf_acts); } /* Schedules 'sf_acts' to be freed after the next RCU grace period. * The caller must hold rcu_read_lock for this to be sensible. */ void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts) { call_rcu(&sf_acts->rcu, rcu_free_acts_callback); } static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) { struct qtag_prefix { __be16 eth_type; /* ETH_P_8021Q */ __be16 tci; }; struct qtag_prefix *qp; if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16))) return 0; if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) + sizeof(__be16)))) return -ENOMEM; qp = (struct qtag_prefix *) skb->data; key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT); __skb_pull(skb, sizeof(struct qtag_prefix)); return 0; } static __be16 parse_ethertype(struct sk_buff *skb) { struct llc_snap_hdr { u8 dsap; /* Always 0xAA */ u8 ssap; /* Always 0xAA */ u8 ctrl; u8 oui[3]; __be16 ethertype; }; struct llc_snap_hdr *llc; __be16 proto; proto = *(__be16 *) skb->data; __skb_pull(skb, sizeof(__be16)); if (ntohs(proto) >= 1536) return proto; if (skb->len < sizeof(struct llc_snap_hdr)) return htons(ETH_P_802_2); if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr)))) return htons(0); llc = (struct llc_snap_hdr *) skb->data; if (llc->dsap != LLC_SAP_SNAP || llc->ssap != LLC_SAP_SNAP || (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0) return htons(ETH_P_802_2); __skb_pull(skb, sizeof(struct llc_snap_hdr)); return llc->ethertype; } static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, int *key_lenp, int nh_len) { struct icmp6hdr *icmp = icmp6_hdr(skb); int error = 0; int key_len; /* The ICMPv6 type and code fields use the 16-bit transport port * fields, so we need to store them in 16-bit network byte order. */ key->ipv6.tp.src = htons(icmp->icmp6_type); key->ipv6.tp.dst = htons(icmp->icmp6_code); key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); if (icmp->icmp6_code == 0 && (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION || icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) { int icmp_len = skb->len - skb_transport_offset(skb); struct nd_msg *nd; int offset; key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); /* In order to process neighbor discovery options, we need the * entire packet. */ if (unlikely(icmp_len < sizeof(*nd))) goto out; if (unlikely(skb_linearize(skb))) { error = -ENOMEM; goto out; } nd = (struct nd_msg *)skb_transport_header(skb); key->ipv6.nd.target = nd->target; key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); icmp_len -= sizeof(*nd); offset = 0; while (icmp_len >= 8) { struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)(nd->opt + offset); int opt_len = nd_opt->nd_opt_len * 8; if (unlikely(!opt_len || opt_len > icmp_len)) goto invalid; /* Store the link layer address if the appropriate * option is provided. It is considered an error if * the same link layer option is specified twice. */ if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR && opt_len == 8) { if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll))) goto invalid; memcpy(key->ipv6.nd.sll, &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN); } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR && opt_len == 8) { if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll))) goto invalid; memcpy(key->ipv6.nd.tll, &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN); } icmp_len -= opt_len; offset += opt_len; } } goto out; invalid: memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target)); memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll)); memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll)); out: *key_lenp = key_len; return error; } /** * ovs_flow_extract - extracts a flow key from an Ethernet frame. * @skb: sk_buff that contains the frame, with skb->data pointing to the * Ethernet header * @in_port: port number on which @skb was received. * @key: output flow key * @key_lenp: length of output flow key * * The caller must ensure that skb->len >= ETH_HLEN. * * Returns 0 if successful, otherwise a negative errno value. * * Initializes @skb header pointers as follows: * * - skb->mac_header: the Ethernet header. * * - skb->network_header: just past the Ethernet header, or just past the * VLAN header, to the first byte of the Ethernet payload. * * - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6 * on output, then just past the IP header, if one is present and * of a correct length, otherwise the same as skb->network_header. * For other key->dl_type values it is left untouched. */ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, int *key_lenp) { int error = 0; int key_len = SW_FLOW_KEY_OFFSET(eth); struct ethhdr *eth; memset(key, 0, sizeof(*key)); key->phy.priority = skb->priority; key->phy.in_port = in_port; skb_reset_mac_header(skb); /* Link layer. We are guaranteed to have at least the 14 byte Ethernet * header in the linear data area. */ eth = eth_hdr(skb); memcpy(key->eth.src, eth->h_source, ETH_ALEN); memcpy(key->eth.dst, eth->h_dest, ETH_ALEN); __skb_pull(skb, 2 * ETH_ALEN); if (vlan_tx_tag_present(skb)) key->eth.tci = htons(skb->vlan_tci); else if (eth->h_proto == htons(ETH_P_8021Q)) if (unlikely(parse_vlan(skb, key))) return -ENOMEM; key->eth.type = parse_ethertype(skb); if (unlikely(key->eth.type == htons(0))) return -ENOMEM; skb_reset_network_header(skb); __skb_push(skb, skb->data - skb_mac_header(skb)); /* Network layer. */ if (key->eth.type == htons(ETH_P_IP)) { struct iphdr *nh; __be16 offset; key_len = SW_FLOW_KEY_OFFSET(ipv4.addr); error = check_iphdr(skb); if (unlikely(error)) { if (error == -EINVAL) { skb->transport_header = skb->network_header; error = 0; } goto out; } nh = ip_hdr(skb); key->ipv4.addr.src = nh->saddr; key->ipv4.addr.dst = nh->daddr; key->ip.proto = nh->protocol; key->ip.tos = nh->tos; key->ip.ttl = nh->ttl; offset = nh->frag_off & htons(IP_OFFSET); if (offset) { key->ip.frag = OVS_FRAG_TYPE_LATER; goto out; } if (nh->frag_off & htons(IP_MF) || skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; /* Transport layer. */ if (key->ip.proto == IPPROTO_TCP) { key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); if (tcphdr_ok(skb)) { struct tcphdr *tcp = tcp_hdr(skb); key->ipv4.tp.src = tcp->source; key->ipv4.tp.dst = tcp->dest; } } else if (key->ip.proto == IPPROTO_UDP) { key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->ipv4.tp.src = udp->source; key->ipv4.tp.dst = udp->dest; } } else if (key->ip.proto == IPPROTO_ICMP) { key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); if (icmphdr_ok(skb)) { struct icmphdr *icmp = icmp_hdr(skb); /* The ICMP type and code fields use the 16-bit * transport port fields, so we need to store * them in 16-bit network byte order. */ key->ipv4.tp.src = htons(icmp->type); key->ipv4.tp.dst = htons(icmp->code); } } } else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) { struct arp_eth_header *arp; arp = (struct arp_eth_header *)skb_network_header(skb); if (arp->ar_hrd == htons(ARPHRD_ETHER) && arp->ar_pro == htons(ETH_P_IP) && arp->ar_hln == ETH_ALEN && arp->ar_pln == 4) { /* We only match on the lower 8 bits of the opcode. */ if (ntohs(arp->ar_op) <= 0xff) key->ip.proto = ntohs(arp->ar_op); if (key->ip.proto == ARPOP_REQUEST || key->ip.proto == ARPOP_REPLY) { memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN); memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN); key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); } } } else if (key->eth.type == htons(ETH_P_IPV6)) { int nh_len; /* IPv6 Header + Extensions */ nh_len = parse_ipv6hdr(skb, key, &key_len); if (unlikely(nh_len < 0)) { if (nh_len == -EINVAL) skb->transport_header = skb->network_header; else error = nh_len; goto out; } if (key->ip.frag == OVS_FRAG_TYPE_LATER) goto out; if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; /* Transport layer. */ if (key->ip.proto == NEXTHDR_TCP) { key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); if (tcphdr_ok(skb)) { struct tcphdr *tcp = tcp_hdr(skb); key->ipv6.tp.src = tcp->source; key->ipv6.tp.dst = tcp->dest; } } else if (key->ip.proto == NEXTHDR_UDP) { key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->ipv6.tp.src = udp->source; key->ipv6.tp.dst = udp->dest; } } else if (key->ip.proto == NEXTHDR_ICMP) { key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); if (icmp6hdr_ok(skb)) { error = parse_icmpv6(skb, key, &key_len, nh_len); if (error < 0) goto out; } } } out: *key_lenp = key_len; return error; } u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len) { return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), 0); } struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table, struct sw_flow_key *key, int key_len) { struct sw_flow *flow; struct hlist_node *n; struct hlist_head *head; u32 hash; hash = ovs_flow_hash(key, key_len); head = find_bucket(table, hash); hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) { if (flow->hash == hash && !memcmp(&flow->key, key, key_len)) { return flow; } } return NULL; } void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) { struct hlist_head *head; head = find_bucket(table, flow->hash); hlist_add_head_rcu(&flow->hash_node[table->node_ver], head); table->count++; } void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) { hlist_del_rcu(&flow->hash_node[table->node_ver]); table->count--; BUG_ON(table->count < 0); } /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_ENCAP] = -1, [OVS_KEY_ATTR_PRIORITY] = sizeof(u32), [OVS_KEY_ATTR_IN_PORT] = sizeof(u32), [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet), [OVS_KEY_ATTR_VLAN] = sizeof(__be16), [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16), [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4), [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6), [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp), [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp), [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp), [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd), }; static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len, const struct nlattr *a[], u32 *attrs) { const struct ovs_key_icmp *icmp_key; const struct ovs_key_tcp *tcp_key; const struct ovs_key_udp *udp_key; switch (swkey->ip.proto) { case IPPROTO_TCP: if (!(*attrs & (1 << OVS_KEY_ATTR_TCP))) return -EINVAL; *attrs &= ~(1 << OVS_KEY_ATTR_TCP); *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); swkey->ipv4.tp.src = tcp_key->tcp_src; swkey->ipv4.tp.dst = tcp_key->tcp_dst; break; case IPPROTO_UDP: if (!(*attrs & (1 << OVS_KEY_ATTR_UDP))) return -EINVAL; *attrs &= ~(1 << OVS_KEY_ATTR_UDP); *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); swkey->ipv4.tp.src = udp_key->udp_src; swkey->ipv4.tp.dst = udp_key->udp_dst; break; case IPPROTO_ICMP: if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP))) return -EINVAL; *attrs &= ~(1 << OVS_KEY_ATTR_ICMP); *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]); swkey->ipv4.tp.src = htons(icmp_key->icmp_type); swkey->ipv4.tp.dst = htons(icmp_key->icmp_code); break; } return 0; } static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len, const struct nlattr *a[], u32 *attrs) { const struct ovs_key_icmpv6 *icmpv6_key; const struct ovs_key_tcp *tcp_key; const struct ovs_key_udp *udp_key; switch (swkey->ip.proto) { case IPPROTO_TCP: if (!(*attrs & (1 << OVS_KEY_ATTR_TCP))) return -EINVAL; *attrs &= ~(1 << OVS_KEY_ATTR_TCP); *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); swkey->ipv6.tp.src = tcp_key->tcp_src; swkey->ipv6.tp.dst = tcp_key->tcp_dst; break; case IPPROTO_UDP: if (!(*attrs & (1 << OVS_KEY_ATTR_UDP))) return -EINVAL; *attrs &= ~(1 << OVS_KEY_ATTR_UDP); *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); swkey->ipv6.tp.src = udp_key->udp_src; swkey->ipv6.tp.dst = udp_key->udp_dst; break; case IPPROTO_ICMPV6: if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6))) return -EINVAL; *attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6); *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]); swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type); swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code); if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) || swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { const struct ovs_key_nd *nd_key; if (!(*attrs & (1 << OVS_KEY_ATTR_ND))) return -EINVAL; *attrs &= ~(1 << OVS_KEY_ATTR_ND); *key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); nd_key = nla_data(a[OVS_KEY_ATTR_ND]); memcpy(&swkey->ipv6.nd.target, nd_key->nd_target, sizeof(swkey->ipv6.nd.target)); memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN); memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN); } break; } return 0; } static int parse_flow_nlattrs(const struct nlattr *attr, const struct nlattr *a[], u32 *attrsp) { const struct nlattr *nla; u32 attrs; int rem; attrs = 0; nla_for_each_nested(nla, attr, rem) { u16 type = nla_type(nla); int expected_len; if (type > OVS_KEY_ATTR_MAX || attrs & (1 << type)) return -EINVAL; expected_len = ovs_key_lens[type]; if (nla_len(nla) != expected_len && expected_len != -1) return -EINVAL; attrs |= 1 << type; a[type] = nla; } if (rem) return -EINVAL; *attrsp = attrs; return 0; } /** * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key. * @swkey: receives the extracted flow key. * @key_lenp: number of bytes used in @swkey. * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute * sequence. */ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, const struct nlattr *attr) { const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; const struct ovs_key_ethernet *eth_key; int key_len; u32 attrs; int err; memset(swkey, 0, sizeof(struct sw_flow_key)); key_len = SW_FLOW_KEY_OFFSET(eth); err = parse_flow_nlattrs(attr, a, &attrs); if (err) return err; /* Metadata attributes. */ if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) { swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]); attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY); } if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) { u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); if (in_port >= DP_MAX_PORTS) return -EINVAL; swkey->phy.in_port = in_port; attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); } else { swkey->phy.in_port = USHRT_MAX; } /* Data attributes. */ if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET))) return -EINVAL; attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET); eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]); memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN); memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN); if (attrs & (1u << OVS_KEY_ATTR_ETHERTYPE) && nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q)) { const struct nlattr *encap; __be16 tci; if (attrs != ((1 << OVS_KEY_ATTR_VLAN) | (1 << OVS_KEY_ATTR_ETHERTYPE) | (1 << OVS_KEY_ATTR_ENCAP))) return -EINVAL; encap = a[OVS_KEY_ATTR_ENCAP]; tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); if (tci & htons(VLAN_TAG_PRESENT)) { swkey->eth.tci = tci; err = parse_flow_nlattrs(encap, a, &attrs); if (err) return err; } else if (!tci) { /* Corner case for truncated 802.1Q header. */ if (nla_len(encap)) return -EINVAL; swkey->eth.type = htons(ETH_P_8021Q); *key_lenp = key_len; return 0; } else { return -EINVAL; } } if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); if (ntohs(swkey->eth.type) < 1536) return -EINVAL; attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); } else { swkey->eth.type = htons(ETH_P_802_2); } if (swkey->eth.type == htons(ETH_P_IP)) { const struct ovs_key_ipv4 *ipv4_key; if (!(attrs & (1 << OVS_KEY_ATTR_IPV4))) return -EINVAL; attrs &= ~(1 << OVS_KEY_ATTR_IPV4); key_len = SW_FLOW_KEY_OFFSET(ipv4.addr); ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]); if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) return -EINVAL; swkey->ip.proto = ipv4_key->ipv4_proto; swkey->ip.tos = ipv4_key->ipv4_tos; swkey->ip.ttl = ipv4_key->ipv4_ttl; swkey->ip.frag = ipv4_key->ipv4_frag; swkey->ipv4.addr.src = ipv4_key->ipv4_src; swkey->ipv4.addr.dst = ipv4_key->ipv4_dst; if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) { err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs); if (err) return err; } } else if (swkey->eth.type == htons(ETH_P_IPV6)) { const struct ovs_key_ipv6 *ipv6_key; if (!(attrs & (1 << OVS_KEY_ATTR_IPV6))) return -EINVAL; attrs &= ~(1 << OVS_KEY_ATTR_IPV6); key_len = SW_FLOW_KEY_OFFSET(ipv6.label); ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]); if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) return -EINVAL; swkey->ipv6.label = ipv6_key->ipv6_label; swkey->ip.proto = ipv6_key->ipv6_proto; swkey->ip.tos = ipv6_key->ipv6_tclass; swkey->ip.ttl = ipv6_key->ipv6_hlimit; swkey->ip.frag = ipv6_key->ipv6_frag; memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src, sizeof(swkey->ipv6.addr.src)); memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst, sizeof(swkey->ipv6.addr.dst)); if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) { err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs); if (err) return err; } } else if (swkey->eth.type == htons(ETH_P_ARP)) { const struct ovs_key_arp *arp_key; if (!(attrs & (1 << OVS_KEY_ATTR_ARP))) return -EINVAL; attrs &= ~(1 << OVS_KEY_ATTR_ARP); key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); arp_key = nla_data(a[OVS_KEY_ATTR_ARP]); swkey->ipv4.addr.src = arp_key->arp_sip; swkey->ipv4.addr.dst = arp_key->arp_tip; if (arp_key->arp_op & htons(0xff00)) return -EINVAL; swkey->ip.proto = ntohs(arp_key->arp_op); memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN); memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN); } if (attrs) return -EINVAL; *key_lenp = key_len; return 0; } /** * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key. * @in_port: receives the extracted input port. * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute * sequence. * * This parses a series of Netlink attributes that form a flow key, which must * take the same form accepted by flow_from_nlattrs(), but only enough of it to * get the metadata, that is, the parts of the flow key that cannot be * extracted from the packet itself. */ int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, const struct nlattr *attr) { const struct nlattr *nla; int rem; *in_port = USHRT_MAX; *priority = 0; nla_for_each_nested(nla, attr, rem) { int type = nla_type(nla); if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) { if (nla_len(nla) != ovs_key_lens[type]) return -EINVAL; switch (type) { case OVS_KEY_ATTR_PRIORITY: *priority = nla_get_u32(nla); break; case OVS_KEY_ATTR_IN_PORT: if (nla_get_u32(nla) >= DP_MAX_PORTS) return -EINVAL; *in_port = nla_get_u32(nla); break; } } } if (rem) return -EINVAL; return 0; } int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) { struct ovs_key_ethernet *eth_key; struct nlattr *nla, *encap; if (swkey->phy.priority) NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority); if (swkey->phy.in_port != USHRT_MAX) NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port); nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); if (!nla) goto nla_put_failure; eth_key = nla_data(nla); memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN); memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN); if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)); NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci); encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); if (!swkey->eth.tci) goto unencap; } else { encap = NULL; } if (swkey->eth.type == htons(ETH_P_802_2)) goto unencap; NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type); if (swkey->eth.type == htons(ETH_P_IP)) { struct ovs_key_ipv4 *ipv4_key; nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key)); if (!nla) goto nla_put_failure; ipv4_key = nla_data(nla); ipv4_key->ipv4_src = swkey->ipv4.addr.src; ipv4_key->ipv4_dst = swkey->ipv4.addr.dst; ipv4_key->ipv4_proto = swkey->ip.proto; ipv4_key->ipv4_tos = swkey->ip.tos; ipv4_key->ipv4_ttl = swkey->ip.ttl; ipv4_key->ipv4_frag = swkey->ip.frag; } else if (swkey->eth.type == htons(ETH_P_IPV6)) { struct ovs_key_ipv6 *ipv6_key; nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key)); if (!nla) goto nla_put_failure; ipv6_key = nla_data(nla); memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src, sizeof(ipv6_key->ipv6_src)); memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst, sizeof(ipv6_key->ipv6_dst)); ipv6_key->ipv6_label = swkey->ipv6.label; ipv6_key->ipv6_proto = swkey->ip.proto; ipv6_key->ipv6_tclass = swkey->ip.tos; ipv6_key->ipv6_hlimit = swkey->ip.ttl; ipv6_key->ipv6_frag = swkey->ip.frag; } else if (swkey->eth.type == htons(ETH_P_ARP)) { struct ovs_key_arp *arp_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key)); if (!nla) goto nla_put_failure; arp_key = nla_data(nla); memset(arp_key, 0, sizeof(struct ovs_key_arp)); arp_key->arp_sip = swkey->ipv4.addr.src; arp_key->arp_tip = swkey->ipv4.addr.dst; arp_key->arp_op = htons(swkey->ip.proto); memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN); memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN); } if ((swkey->eth.type == htons(ETH_P_IP) || swkey->eth.type == htons(ETH_P_IPV6)) && swkey->ip.frag != OVS_FRAG_TYPE_LATER) { if (swkey->ip.proto == IPPROTO_TCP) { struct ovs_key_tcp *tcp_key; nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key)); if (!nla) goto nla_put_failure; tcp_key = nla_data(nla); if (swkey->eth.type == htons(ETH_P_IP)) { tcp_key->tcp_src = swkey->ipv4.tp.src; tcp_key->tcp_dst = swkey->ipv4.tp.dst; } else if (swkey->eth.type == htons(ETH_P_IPV6)) { tcp_key->tcp_src = swkey->ipv6.tp.src; tcp_key->tcp_dst = swkey->ipv6.tp.dst; } } else if (swkey->ip.proto == IPPROTO_UDP) { struct ovs_key_udp *udp_key; nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key)); if (!nla) goto nla_put_failure; udp_key = nla_data(nla); if (swkey->eth.type == htons(ETH_P_IP)) { udp_key->udp_src = swkey->ipv4.tp.src; udp_key->udp_dst = swkey->ipv4.tp.dst; } else if (swkey->eth.type == htons(ETH_P_IPV6)) { udp_key->udp_src = swkey->ipv6.tp.src; udp_key->udp_dst = swkey->ipv6.tp.dst; } } else if (swkey->eth.type == htons(ETH_P_IP) && swkey->ip.proto == IPPROTO_ICMP) { struct ovs_key_icmp *icmp_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key)); if (!nla) goto nla_put_failure; icmp_key = nla_data(nla); icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src); icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst); } else if (swkey->eth.type == htons(ETH_P_IPV6) && swkey->ip.proto == IPPROTO_ICMPV6) { struct ovs_key_icmpv6 *icmpv6_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6, sizeof(*icmpv6_key)); if (!nla) goto nla_put_failure; icmpv6_key = nla_data(nla); icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src); icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst); if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION || icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) { struct ovs_key_nd *nd_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key)); if (!nla) goto nla_put_failure; nd_key = nla_data(nla); memcpy(nd_key->nd_target, &swkey->ipv6.nd.target, sizeof(nd_key->nd_target)); memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN); memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN); } } } unencap: if (encap) nla_nest_end(skb, encap); return 0; nla_put_failure: return -EMSGSIZE; } /* Initializes the flow module. * Returns zero if successful or a negative error code. */ int ovs_flow_init(void) { flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, 0, NULL); if (flow_cache == NULL) return -ENOMEM; return 0; } /* Uninitializes the flow module. */ void ovs_flow_exit(void) { kmem_cache_destroy(flow_cache); }
gpl-2.0
cooldudezach/android_kernel_zte_nex
drivers/tty/serial/amba-pl010.c
5025
19937
/* * Driver for AMBA serial ports * * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. * * Copyright 1999 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * This is a generic driver for ARM AMBA-type serial ports. They * have a lot of 16550-like features, but are not register compatible. * Note that although they do have CTS, DCD and DSR inputs, they do * not have an RI input, nor do they have DTR or RTS outputs. If * required, these have to be supplied via some other means (eg, GPIO) * and hooked into this driver. */ #if defined(CONFIG_SERIAL_AMBA_PL010_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/amba/bus.h> #include <linux/amba/serial.h> #include <linux/clk.h> #include <linux/slab.h> #include <asm/io.h> #define UART_NR 8 #define SERIAL_AMBA_MAJOR 204 #define SERIAL_AMBA_MINOR 16 #define SERIAL_AMBA_NR UART_NR #define AMBA_ISR_PASS_LIMIT 256 #define UART_RX_DATA(s) (((s) & UART01x_FR_RXFE) == 0) #define UART_TX_READY(s) (((s) & UART01x_FR_TXFF) == 0) #define UART_DUMMY_RSR_RX 256 #define UART_PORT_SIZE 64 /* * We wrap our port structure around the generic uart_port. */ struct uart_amba_port { struct uart_port port; struct clk *clk; struct amba_device *dev; struct amba_pl010_data *data; unsigned int old_status; }; static void pl010_stop_tx(struct uart_port *port) { struct uart_amba_port *uap = (struct uart_amba_port *)port; unsigned int cr; cr = readb(uap->port.membase + UART010_CR); cr &= ~UART010_CR_TIE; writel(cr, uap->port.membase + UART010_CR); } static void pl010_start_tx(struct uart_port *port) { struct uart_amba_port *uap = (struct uart_amba_port *)port; unsigned int cr; cr = readb(uap->port.membase + UART010_CR); cr |= UART010_CR_TIE; writel(cr, uap->port.membase + UART010_CR); } static void pl010_stop_rx(struct uart_port *port) { struct uart_amba_port *uap = (struct uart_amba_port *)port; unsigned int cr; cr = readb(uap->port.membase + UART010_CR); cr &= ~(UART010_CR_RIE | UART010_CR_RTIE); writel(cr, uap->port.membase + UART010_CR); } static void pl010_enable_ms(struct uart_port *port) { struct uart_amba_port *uap = (struct uart_amba_port *)port; unsigned int cr; cr = readb(uap->port.membase + UART010_CR); cr |= UART010_CR_MSIE; writel(cr, uap->port.membase + UART010_CR); } static void pl010_rx_chars(struct uart_amba_port *uap) { struct tty_struct *tty = uap->port.state->port.tty; unsigned int status, ch, flag, rsr, max_count = 256; status = readb(uap->port.membase + UART01x_FR); while (UART_RX_DATA(status) && max_count--) { ch = readb(uap->port.membase + UART01x_DR); flag = TTY_NORMAL; uap->port.icount.rx++; /* * Note that the error handling code is * out of the main execution path */ rsr = readb(uap->port.membase + UART01x_RSR) | UART_DUMMY_RSR_RX; if (unlikely(rsr & UART01x_RSR_ANY)) { writel(0, uap->port.membase + UART01x_ECR); if (rsr & UART01x_RSR_BE) { rsr &= ~(UART01x_RSR_FE | UART01x_RSR_PE); uap->port.icount.brk++; if (uart_handle_break(&uap->port)) goto ignore_char; } else if (rsr & UART01x_RSR_PE) uap->port.icount.parity++; else if (rsr & UART01x_RSR_FE) uap->port.icount.frame++; if (rsr & UART01x_RSR_OE) uap->port.icount.overrun++; rsr &= uap->port.read_status_mask; if (rsr & UART01x_RSR_BE) flag = TTY_BREAK; else if (rsr & UART01x_RSR_PE) flag = TTY_PARITY; else if (rsr & UART01x_RSR_FE) flag = TTY_FRAME; } if (uart_handle_sysrq_char(&uap->port, ch)) goto ignore_char; uart_insert_char(&uap->port, rsr, UART01x_RSR_OE, ch, flag); ignore_char: status = readb(uap->port.membase + UART01x_FR); } spin_unlock(&uap->port.lock); tty_flip_buffer_push(tty); spin_lock(&uap->port.lock); } static void pl010_tx_chars(struct uart_amba_port *uap) { struct circ_buf *xmit = &uap->port.state->xmit; int count; if (uap->port.x_char) { writel(uap->port.x_char, uap->port.membase + UART01x_DR); uap->port.icount.tx++; uap->port.x_char = 0; return; } if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) { pl010_stop_tx(&uap->port); return; } count = uap->port.fifosize >> 1; do { writel(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); uap->port.icount.tx++; if (uart_circ_empty(xmit)) break; } while (--count > 0); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&uap->port); if (uart_circ_empty(xmit)) pl010_stop_tx(&uap->port); } static void pl010_modem_status(struct uart_amba_port *uap) { unsigned int status, delta; writel(0, uap->port.membase + UART010_ICR); status = readb(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY; delta = status ^ uap->old_status; uap->old_status = status; if (!delta) return; if (delta & UART01x_FR_DCD) uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD); if (delta & UART01x_FR_DSR) uap->port.icount.dsr++; if (delta & UART01x_FR_CTS) uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS); wake_up_interruptible(&uap->port.state->port.delta_msr_wait); } static irqreturn_t pl010_int(int irq, void *dev_id) { struct uart_amba_port *uap = dev_id; unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT; int handled = 0; spin_lock(&uap->port.lock); status = readb(uap->port.membase + UART010_IIR); if (status) { do { if (status & (UART010_IIR_RTIS | UART010_IIR_RIS)) pl010_rx_chars(uap); if (status & UART010_IIR_MIS) pl010_modem_status(uap); if (status & UART010_IIR_TIS) pl010_tx_chars(uap); if (pass_counter-- == 0) break; status = readb(uap->port.membase + UART010_IIR); } while (status & (UART010_IIR_RTIS | UART010_IIR_RIS | UART010_IIR_TIS)); handled = 1; } spin_unlock(&uap->port.lock); return IRQ_RETVAL(handled); } static unsigned int pl010_tx_empty(struct uart_port *port) { struct uart_amba_port *uap = (struct uart_amba_port *)port; unsigned int status = readb(uap->port.membase + UART01x_FR); return status & UART01x_FR_BUSY ? 0 : TIOCSER_TEMT; } static unsigned int pl010_get_mctrl(struct uart_port *port) { struct uart_amba_port *uap = (struct uart_amba_port *)port; unsigned int result = 0; unsigned int status; status = readb(uap->port.membase + UART01x_FR); if (status & UART01x_FR_DCD) result |= TIOCM_CAR; if (status & UART01x_FR_DSR) result |= TIOCM_DSR; if (status & UART01x_FR_CTS) result |= TIOCM_CTS; return result; } static void pl010_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct uart_amba_port *uap = (struct uart_amba_port *)port; if (uap->data) uap->data->set_mctrl(uap->dev, uap->port.membase, mctrl); } static void pl010_break_ctl(struct uart_port *port, int break_state) { struct uart_amba_port *uap = (struct uart_amba_port *)port; unsigned long flags; unsigned int lcr_h; spin_lock_irqsave(&uap->port.lock, flags); lcr_h = readb(uap->port.membase + UART010_LCRH); if (break_state == -1) lcr_h |= UART01x_LCRH_BRK; else lcr_h &= ~UART01x_LCRH_BRK; writel(lcr_h, uap->port.membase + UART010_LCRH); spin_unlock_irqrestore(&uap->port.lock, flags); } static int pl010_startup(struct uart_port *port) { struct uart_amba_port *uap = (struct uart_amba_port *)port; int retval; retval = clk_prepare(uap->clk); if (retval) goto out; /* * Try to enable the clock producer. */ retval = clk_enable(uap->clk); if (retval) goto clk_unprep; uap->port.uartclk = clk_get_rate(uap->clk); /* * Allocate the IRQ */ retval = request_irq(uap->port.irq, pl010_int, 0, "uart-pl010", uap); if (retval) goto clk_dis; /* * initialise the old status of the modem signals */ uap->old_status = readb(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY; /* * Finally, enable interrupts */ writel(UART01x_CR_UARTEN | UART010_CR_RIE | UART010_CR_RTIE, uap->port.membase + UART010_CR); return 0; clk_dis: clk_disable(uap->clk); clk_unprep: clk_unprepare(uap->clk); out: return retval; } static void pl010_shutdown(struct uart_port *port) { struct uart_amba_port *uap = (struct uart_amba_port *)port; /* * Free the interrupt */ free_irq(uap->port.irq, uap); /* * disable all interrupts, disable the port */ writel(0, uap->port.membase + UART010_CR); /* disable break condition and fifos */ writel(readb(uap->port.membase + UART010_LCRH) & ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN), uap->port.membase + UART010_LCRH); /* * Shut down the clock producer */ clk_disable(uap->clk); clk_unprepare(uap->clk); } static void pl010_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct uart_amba_port *uap = (struct uart_amba_port *)port; unsigned int lcr_h, old_cr; unsigned long flags; unsigned int baud, quot; /* * Ask the core to calculate the divisor for us. */ baud = uart_get_baud_rate(port, termios, old, 0, uap->port.uartclk/16); quot = uart_get_divisor(port, baud); switch (termios->c_cflag & CSIZE) { case CS5: lcr_h = UART01x_LCRH_WLEN_5; break; case CS6: lcr_h = UART01x_LCRH_WLEN_6; break; case CS7: lcr_h = UART01x_LCRH_WLEN_7; break; default: // CS8 lcr_h = UART01x_LCRH_WLEN_8; break; } if (termios->c_cflag & CSTOPB) lcr_h |= UART01x_LCRH_STP2; if (termios->c_cflag & PARENB) { lcr_h |= UART01x_LCRH_PEN; if (!(termios->c_cflag & PARODD)) lcr_h |= UART01x_LCRH_EPS; } if (uap->port.fifosize > 1) lcr_h |= UART01x_LCRH_FEN; spin_lock_irqsave(&uap->port.lock, flags); /* * Update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, baud); uap->port.read_status_mask = UART01x_RSR_OE; if (termios->c_iflag & INPCK) uap->port.read_status_mask |= UART01x_RSR_FE | UART01x_RSR_PE; if (termios->c_iflag & (BRKINT | PARMRK)) uap->port.read_status_mask |= UART01x_RSR_BE; /* * Characters to ignore */ uap->port.ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) uap->port.ignore_status_mask |= UART01x_RSR_FE | UART01x_RSR_PE; if (termios->c_iflag & IGNBRK) { uap->port.ignore_status_mask |= UART01x_RSR_BE; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) uap->port.ignore_status_mask |= UART01x_RSR_OE; } /* * Ignore all characters if CREAD is not set. */ if ((termios->c_cflag & CREAD) == 0) uap->port.ignore_status_mask |= UART_DUMMY_RSR_RX; /* first, disable everything */ old_cr = readb(uap->port.membase + UART010_CR) & ~UART010_CR_MSIE; if (UART_ENABLE_MS(port, termios->c_cflag)) old_cr |= UART010_CR_MSIE; writel(0, uap->port.membase + UART010_CR); /* Set baud rate */ quot -= 1; writel((quot & 0xf00) >> 8, uap->port.membase + UART010_LCRM); writel(quot & 0xff, uap->port.membase + UART010_LCRL); /* * ----------v----------v----------v----------v----- * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L * ----------^----------^----------^----------^----- */ writel(lcr_h, uap->port.membase + UART010_LCRH); writel(old_cr, uap->port.membase + UART010_CR); spin_unlock_irqrestore(&uap->port.lock, flags); } static void pl010_set_ldisc(struct uart_port *port, int new) { if (new == N_PPS) { port->flags |= UPF_HARDPPS_CD; pl010_enable_ms(port); } else port->flags &= ~UPF_HARDPPS_CD; } static const char *pl010_type(struct uart_port *port) { return port->type == PORT_AMBA ? "AMBA" : NULL; } /* * Release the memory region(s) being used by 'port' */ static void pl010_release_port(struct uart_port *port) { release_mem_region(port->mapbase, UART_PORT_SIZE); } /* * Request the memory region(s) being used by 'port' */ static int pl010_request_port(struct uart_port *port) { return request_mem_region(port->mapbase, UART_PORT_SIZE, "uart-pl010") != NULL ? 0 : -EBUSY; } /* * Configure/autoconfigure the port. */ static void pl010_config_port(struct uart_port *port, int flags) { if (flags & UART_CONFIG_TYPE) { port->type = PORT_AMBA; pl010_request_port(port); } } /* * verify the new serial_struct (for TIOCSSERIAL). */ static int pl010_verify_port(struct uart_port *port, struct serial_struct *ser) { int ret = 0; if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA) ret = -EINVAL; if (ser->irq < 0 || ser->irq >= nr_irqs) ret = -EINVAL; if (ser->baud_base < 9600) ret = -EINVAL; return ret; } static struct uart_ops amba_pl010_pops = { .tx_empty = pl010_tx_empty, .set_mctrl = pl010_set_mctrl, .get_mctrl = pl010_get_mctrl, .stop_tx = pl010_stop_tx, .start_tx = pl010_start_tx, .stop_rx = pl010_stop_rx, .enable_ms = pl010_enable_ms, .break_ctl = pl010_break_ctl, .startup = pl010_startup, .shutdown = pl010_shutdown, .set_termios = pl010_set_termios, .set_ldisc = pl010_set_ldisc, .type = pl010_type, .release_port = pl010_release_port, .request_port = pl010_request_port, .config_port = pl010_config_port, .verify_port = pl010_verify_port, }; static struct uart_amba_port *amba_ports[UART_NR]; #ifdef CONFIG_SERIAL_AMBA_PL010_CONSOLE static void pl010_console_putchar(struct uart_port *port, int ch) { struct uart_amba_port *uap = (struct uart_amba_port *)port; unsigned int status; do { status = readb(uap->port.membase + UART01x_FR); barrier(); } while (!UART_TX_READY(status)); writel(ch, uap->port.membase + UART01x_DR); } static void pl010_console_write(struct console *co, const char *s, unsigned int count) { struct uart_amba_port *uap = amba_ports[co->index]; unsigned int status, old_cr; clk_enable(uap->clk); /* * First save the CR then disable the interrupts */ old_cr = readb(uap->port.membase + UART010_CR); writel(UART01x_CR_UARTEN, uap->port.membase + UART010_CR); uart_console_write(&uap->port, s, count, pl010_console_putchar); /* * Finally, wait for transmitter to become empty * and restore the TCR */ do { status = readb(uap->port.membase + UART01x_FR); barrier(); } while (status & UART01x_FR_BUSY); writel(old_cr, uap->port.membase + UART010_CR); clk_disable(uap->clk); } static void __init pl010_console_get_options(struct uart_amba_port *uap, int *baud, int *parity, int *bits) { if (readb(uap->port.membase + UART010_CR) & UART01x_CR_UARTEN) { unsigned int lcr_h, quot; lcr_h = readb(uap->port.membase + UART010_LCRH); *parity = 'n'; if (lcr_h & UART01x_LCRH_PEN) { if (lcr_h & UART01x_LCRH_EPS) *parity = 'e'; else *parity = 'o'; } if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7) *bits = 7; else *bits = 8; quot = readb(uap->port.membase + UART010_LCRL) | readb(uap->port.membase + UART010_LCRM) << 8; *baud = uap->port.uartclk / (16 * (quot + 1)); } } static int __init pl010_console_setup(struct console *co, char *options) { struct uart_amba_port *uap; int baud = 38400; int bits = 8; int parity = 'n'; int flow = 'n'; int ret; /* * Check whether an invalid uart number has been specified, and * if so, search for the first available port that does have * console support. */ if (co->index >= UART_NR) co->index = 0; uap = amba_ports[co->index]; if (!uap) return -ENODEV; ret = clk_prepare(uap->clk); if (ret) return ret; uap->port.uartclk = clk_get_rate(uap->clk); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else pl010_console_get_options(uap, &baud, &parity, &bits); return uart_set_options(&uap->port, co, baud, parity, bits, flow); } static struct uart_driver amba_reg; static struct console amba_console = { .name = "ttyAM", .write = pl010_console_write, .device = uart_console_device, .setup = pl010_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &amba_reg, }; #define AMBA_CONSOLE &amba_console #else #define AMBA_CONSOLE NULL #endif static struct uart_driver amba_reg = { .owner = THIS_MODULE, .driver_name = "ttyAM", .dev_name = "ttyAM", .major = SERIAL_AMBA_MAJOR, .minor = SERIAL_AMBA_MINOR, .nr = UART_NR, .cons = AMBA_CONSOLE, }; static int pl010_probe(struct amba_device *dev, const struct amba_id *id) { struct uart_amba_port *uap; void __iomem *base; int i, ret; for (i = 0; i < ARRAY_SIZE(amba_ports); i++) if (amba_ports[i] == NULL) break; if (i == ARRAY_SIZE(amba_ports)) { ret = -EBUSY; goto out; } uap = kzalloc(sizeof(struct uart_amba_port), GFP_KERNEL); if (!uap) { ret = -ENOMEM; goto out; } base = ioremap(dev->res.start, resource_size(&dev->res)); if (!base) { ret = -ENOMEM; goto free; } uap->clk = clk_get(&dev->dev, NULL); if (IS_ERR(uap->clk)) { ret = PTR_ERR(uap->clk); goto unmap; } uap->port.dev = &dev->dev; uap->port.mapbase = dev->res.start; uap->port.membase = base; uap->port.iotype = UPIO_MEM; uap->port.irq = dev->irq[0]; uap->port.fifosize = 16; uap->port.ops = &amba_pl010_pops; uap->port.flags = UPF_BOOT_AUTOCONF; uap->port.line = i; uap->dev = dev; uap->data = dev->dev.platform_data; amba_ports[i] = uap; amba_set_drvdata(dev, uap); ret = uart_add_one_port(&amba_reg, &uap->port); if (ret) { amba_set_drvdata(dev, NULL); amba_ports[i] = NULL; clk_put(uap->clk); unmap: iounmap(base); free: kfree(uap); } out: return ret; } static int pl010_remove(struct amba_device *dev) { struct uart_amba_port *uap = amba_get_drvdata(dev); int i; amba_set_drvdata(dev, NULL); uart_remove_one_port(&amba_reg, &uap->port); for (i = 0; i < ARRAY_SIZE(amba_ports); i++) if (amba_ports[i] == uap) amba_ports[i] = NULL; iounmap(uap->port.membase); clk_put(uap->clk); kfree(uap); return 0; } static int pl010_suspend(struct amba_device *dev, pm_message_t state) { struct uart_amba_port *uap = amba_get_drvdata(dev); if (uap) uart_suspend_port(&amba_reg, &uap->port); return 0; } static int pl010_resume(struct amba_device *dev) { struct uart_amba_port *uap = amba_get_drvdata(dev); if (uap) uart_resume_port(&amba_reg, &uap->port); return 0; } static struct amba_id pl010_ids[] = { { .id = 0x00041010, .mask = 0x000fffff, }, { 0, 0 }, }; MODULE_DEVICE_TABLE(amba, pl010_ids); static struct amba_driver pl010_driver = { .drv = { .name = "uart-pl010", }, .id_table = pl010_ids, .probe = pl010_probe, .remove = pl010_remove, .suspend = pl010_suspend, .resume = pl010_resume, }; static int __init pl010_init(void) { int ret; printk(KERN_INFO "Serial: AMBA driver\n"); ret = uart_register_driver(&amba_reg); if (ret == 0) { ret = amba_driver_register(&pl010_driver); if (ret) uart_unregister_driver(&amba_reg); } return ret; } static void __exit pl010_exit(void) { amba_driver_unregister(&pl010_driver); uart_unregister_driver(&amba_reg); } module_init(pl010_init); module_exit(pl010_exit); MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd"); MODULE_DESCRIPTION("ARM AMBA serial port driver"); MODULE_LICENSE("GPL");
gpl-2.0
phonelab/android_kernel
arch/parisc/kernel/signal32.c
6049
16732
/* Signal support for 32-bit kernel builds * * Copyright (C) 2001 Matthew Wilcox <willy at parisc-linux.org> * Copyright (C) 2006 Kyle McMartin <kyle at parisc-linux.org> * * Code was mostly borrowed from kernel/signal.c. * See kernel/signal.c for additional Copyrights. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/compat.h> #include <linux/module.h> #include <linux/unistd.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/syscalls.h> #include <linux/types.h> #include <linux/errno.h> #include <asm/uaccess.h> #include "signal32.h" #include "sys32.h" #define DEBUG_COMPAT_SIG 0 #define DEBUG_COMPAT_SIG_LEVEL 2 #if DEBUG_COMPAT_SIG #define DBG(LEVEL, ...) \ ((DEBUG_COMPAT_SIG_LEVEL >= LEVEL) \ ? printk(__VA_ARGS__) : (void) 0) #else #define DBG(LEVEL, ...) #endif #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) inline void sigset_32to64(sigset_t *s64, compat_sigset_t *s32) { s64->sig[0] = s32->sig[0] | ((unsigned long)s32->sig[1] << 32); } inline void sigset_64to32(compat_sigset_t *s32, sigset_t *s64) { s32->sig[0] = s64->sig[0] & 0xffffffffUL; s32->sig[1] = (s64->sig[0] >> 32) & 0xffffffffUL; } static int put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) { compat_sigset_t s; if (sz != sizeof *set) panic("put_sigset32()"); sigset_64to32(&s, set); return copy_to_user(up, &s, sizeof s); } static int get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) { compat_sigset_t s; int r; if (sz != sizeof *set) panic("put_sigset32()"); if ((r = copy_from_user(&s, up, sz)) == 0) { sigset_32to64(set, &s); } return r; } int sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, compat_sigset_t __user *oset, unsigned int sigsetsize) { sigset_t old_set, new_set; int ret; if (set && get_sigset32(set, &new_set, sigsetsize)) return -EFAULT; KERNEL_SYSCALL(ret, sys_rt_sigprocmask, how, set ? (sigset_t __user *)&new_set : NULL, oset ? (sigset_t __user *)&old_set : NULL, sigsetsize); if (!ret && oset && put_sigset32(oset, &old_set, sigsetsize)) return -EFAULT; return ret; } int sys32_rt_sigpending(compat_sigset_t __user *uset, unsigned int sigsetsize) { int ret; sigset_t set; KERNEL_SYSCALL(ret, sys_rt_sigpending, (sigset_t __user *)&set, sigsetsize); if (!ret && put_sigset32(uset, &set, sigsetsize)) return -EFAULT; return ret; } long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, struct sigaction32 __user *oact, size_t sigsetsize) { struct k_sigaction32 new_sa32, old_sa32; struct k_sigaction new_sa, old_sa; int ret = -EINVAL; if (act) { if (copy_from_user(&new_sa32.sa, act, sizeof new_sa32.sa)) return -EFAULT; new_sa.sa.sa_handler = (__sighandler_t)(unsigned long)new_sa32.sa.sa_handler; new_sa.sa.sa_flags = new_sa32.sa.sa_flags; sigset_32to64(&new_sa.sa.sa_mask, &new_sa32.sa.sa_mask); } ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); if (!ret && oact) { sigset_64to32(&old_sa32.sa.sa_mask, &old_sa.sa.sa_mask); old_sa32.sa.sa_flags = old_sa.sa.sa_flags; old_sa32.sa.sa_handler = (__sighandler_t32)(unsigned long)old_sa.sa.sa_handler; if (copy_to_user(oact, &old_sa32.sa, sizeof old_sa32.sa)) return -EFAULT; } return ret; } int do_sigaltstack32 (const compat_stack_t __user *uss32, compat_stack_t __user *uoss32, unsigned long sp) { compat_stack_t ss32, oss32; stack_t ss, oss; stack_t *ssp = NULL, *ossp = NULL; int ret; if (uss32) { if (copy_from_user(&ss32, uss32, sizeof ss32)) return -EFAULT; ss.ss_sp = (void __user *)(unsigned long)ss32.ss_sp; ss.ss_flags = ss32.ss_flags; ss.ss_size = ss32.ss_size; ssp = &ss; } if (uoss32) ossp = &oss; KERNEL_SYSCALL(ret, do_sigaltstack, (const stack_t __user *)ssp, (stack_t __user *)ossp, sp); if (!ret && uoss32) { oss32.ss_sp = (unsigned int)(unsigned long)oss.ss_sp; oss32.ss_flags = oss.ss_flags; oss32.ss_size = oss.ss_size; if (copy_to_user(uoss32, &oss32, sizeof *uoss32)) return -EFAULT; } return ret; } long restore_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf, struct pt_regs *regs) { long err = 0; compat_uint_t compat_reg; compat_uint_t compat_regt; int regn; /* When loading 32-bit values into 64-bit registers make sure to clear the upper 32-bits */ DBG(2,"restore_sigcontext32: PER_LINUX32 process\n"); DBG(2,"restore_sigcontext32: sc = 0x%p, rf = 0x%p, regs = 0x%p\n", sc, rf, regs); DBG(2,"restore_sigcontext32: compat_sigcontext is %#lx bytes\n", sizeof(*sc)); for(regn=0; regn < 32; regn++){ err |= __get_user(compat_reg,&sc->sc_gr[regn]); regs->gr[regn] = compat_reg; /* Load upper half */ err |= __get_user(compat_regt,&rf->rf_gr[regn]); regs->gr[regn] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(3,"restore_sigcontext32: gr%02d = %#lx (%#x / %#x)\n", regn, regs->gr[regn], compat_regt, compat_reg); } DBG(2,"restore_sigcontext32: sc->sc_fr = 0x%p (%#lx)\n",sc->sc_fr, sizeof(sc->sc_fr)); /* XXX: BE WARNED FR's are 64-BIT! */ err |= __copy_from_user(regs->fr, sc->sc_fr, sizeof(regs->fr)); /* Better safe than sorry, pass __get_user two things of the same size and let gcc do the upward conversion to 64-bits */ err |= __get_user(compat_reg, &sc->sc_iaoq[0]); /* Load upper half */ err |= __get_user(compat_regt, &rf->rf_iaoq[0]); regs->iaoq[0] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper half of iaoq[0] = %#lx\n", compat_regt); DBG(2,"restore_sigcontext32: sc->sc_iaoq[0] = %p => %#x\n", &sc->sc_iaoq[0], compat_reg); err |= __get_user(compat_reg, &sc->sc_iaoq[1]); /* Load upper half */ err |= __get_user(compat_regt, &rf->rf_iaoq[1]); regs->iaoq[1] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper half of iaoq[1] = %#lx\n", compat_regt); DBG(2,"restore_sigcontext32: sc->sc_iaoq[1] = %p => %#x\n", &sc->sc_iaoq[1],compat_reg); DBG(2,"restore_sigcontext32: iaoq is %#lx / %#lx\n", regs->iaoq[0],regs->iaoq[1]); err |= __get_user(compat_reg, &sc->sc_iasq[0]); /* Load the upper half for iasq */ err |= __get_user(compat_regt, &rf->rf_iasq[0]); regs->iasq[0] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper half of iasq[0] = %#lx\n", compat_regt); err |= __get_user(compat_reg, &sc->sc_iasq[1]); /* Load the upper half for iasq */ err |= __get_user(compat_regt, &rf->rf_iasq[1]); regs->iasq[1] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper half of iasq[1] = %#lx\n", compat_regt); DBG(2,"restore_sigcontext32: iasq is %#lx / %#lx\n", regs->iasq[0],regs->iasq[1]); err |= __get_user(compat_reg, &sc->sc_sar); /* Load the upper half for sar */ err |= __get_user(compat_regt, &rf->rf_sar); regs->sar = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper_half & sar = %#lx\n", compat_regt); DBG(2,"restore_sigcontext32: sar is %#lx\n", regs->sar); DBG(2,"restore_sigcontext32: r28 is %ld\n", regs->gr[28]); return err; } /* * Set up the sigcontext structure for this process. * This is not an easy task if the kernel is 64-bit, it will require * that we examine the process personality to determine if we need to * truncate for a 32-bit userspace. */ long setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf, struct pt_regs *regs, int in_syscall) { compat_int_t flags = 0; long err = 0; compat_uint_t compat_reg; compat_uint_t compat_regb; int regn; if (on_sig_stack((unsigned long) sc)) flags |= PARISC_SC_FLAG_ONSTACK; if (in_syscall) { DBG(1,"setup_sigcontext32: in_syscall\n"); flags |= PARISC_SC_FLAG_IN_SYSCALL; /* Truncate gr31 */ compat_reg = (compat_uint_t)(regs->gr[31]); /* regs->iaoq is undefined in the syscall return path */ err |= __put_user(compat_reg, &sc->sc_iaoq[0]); DBG(2,"setup_sigcontext32: sc->sc_iaoq[0] = %p <= %#x\n", &sc->sc_iaoq[0], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->gr[31] >> 32); err |= __put_user(compat_reg, &rf->rf_iaoq[0]); DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg); compat_reg = (compat_uint_t)(regs->gr[31]+4); err |= __put_user(compat_reg, &sc->sc_iaoq[1]); DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n", &sc->sc_iaoq[1], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)((regs->gr[31]+4) >> 32); err |= __put_user(compat_reg, &rf->rf_iaoq[1]); DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg); /* Truncate sr3 */ compat_reg = (compat_uint_t)(regs->sr[3]); err |= __put_user(compat_reg, &sc->sc_iasq[0]); err |= __put_user(compat_reg, &sc->sc_iasq[1]); /* Store upper half */ compat_reg = (compat_uint_t)(regs->sr[3] >> 32); err |= __put_user(compat_reg, &rf->rf_iasq[0]); err |= __put_user(compat_reg, &rf->rf_iasq[1]); DBG(2,"setup_sigcontext32: upper half iasq[0] = %#x\n", compat_reg); DBG(2,"setup_sigcontext32: upper half iasq[1] = %#x\n", compat_reg); DBG(1,"setup_sigcontext32: iaoq %#lx / %#lx\n", regs->gr[31], regs->gr[31]+4); } else { compat_reg = (compat_uint_t)(regs->iaoq[0]); err |= __put_user(compat_reg, &sc->sc_iaoq[0]); DBG(2,"setup_sigcontext32: sc->sc_iaoq[0] = %p <= %#x\n", &sc->sc_iaoq[0], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->iaoq[0] >> 32); err |= __put_user(compat_reg, &rf->rf_iaoq[0]); DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg); compat_reg = (compat_uint_t)(regs->iaoq[1]); err |= __put_user(compat_reg, &sc->sc_iaoq[1]); DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n", &sc->sc_iaoq[1], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->iaoq[1] >> 32); err |= __put_user(compat_reg, &rf->rf_iaoq[1]); DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg); compat_reg = (compat_uint_t)(regs->iasq[0]); err |= __put_user(compat_reg, &sc->sc_iasq[0]); DBG(2,"setup_sigcontext32: sc->sc_iasq[0] = %p <= %#x\n", &sc->sc_iasq[0], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->iasq[0] >> 32); err |= __put_user(compat_reg, &rf->rf_iasq[0]); DBG(2,"setup_sigcontext32: upper half iasq[0] = %#x\n", compat_reg); compat_reg = (compat_uint_t)(regs->iasq[1]); err |= __put_user(compat_reg, &sc->sc_iasq[1]); DBG(2,"setup_sigcontext32: sc->sc_iasq[1] = %p <= %#x\n", &sc->sc_iasq[1], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->iasq[1] >> 32); err |= __put_user(compat_reg, &rf->rf_iasq[1]); DBG(2,"setup_sigcontext32: upper half iasq[1] = %#x\n", compat_reg); /* Print out the IAOQ for debugging */ DBG(1,"setup_sigcontext32: ia0q %#lx / %#lx\n", regs->iaoq[0], regs->iaoq[1]); } err |= __put_user(flags, &sc->sc_flags); DBG(1,"setup_sigcontext32: Truncating general registers.\n"); for(regn=0; regn < 32; regn++){ /* Truncate a general register */ compat_reg = (compat_uint_t)(regs->gr[regn]); err |= __put_user(compat_reg, &sc->sc_gr[regn]); /* Store upper half */ compat_regb = (compat_uint_t)(regs->gr[regn] >> 32); err |= __put_user(compat_regb, &rf->rf_gr[regn]); /* DEBUG: Write out the "upper / lower" register data */ DBG(2,"setup_sigcontext32: gr%02d = %#x / %#x\n", regn, compat_regb, compat_reg); } /* Copy the floating point registers (same size) XXX: BE WARNED FR's are 64-BIT! */ DBG(1,"setup_sigcontext32: Copying from regs to sc, " "sc->sc_fr size = %#lx, regs->fr size = %#lx\n", sizeof(regs->fr), sizeof(sc->sc_fr)); err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr)); compat_reg = (compat_uint_t)(regs->sar); err |= __put_user(compat_reg, &sc->sc_sar); DBG(2,"setup_sigcontext32: sar is %#x\n", compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->sar >> 32); err |= __put_user(compat_reg, &rf->rf_sar); DBG(2,"setup_sigcontext32: upper half sar = %#x\n", compat_reg); DBG(1,"setup_sigcontext32: r28 is %ld\n", regs->gr[28]); return err; } int copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from) { compat_uptr_t addr; int err; if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t))) return -EFAULT; err = __get_user(to->si_signo, &from->si_signo); err |= __get_user(to->si_errno, &from->si_errno); err |= __get_user(to->si_code, &from->si_code); if (to->si_code < 0) err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); else { switch (to->si_code >> 16) { case __SI_CHLD >> 16: err |= __get_user(to->si_utime, &from->si_utime); err |= __get_user(to->si_stime, &from->si_stime); err |= __get_user(to->si_status, &from->si_status); default: err |= __get_user(to->si_pid, &from->si_pid); err |= __get_user(to->si_uid, &from->si_uid); break; case __SI_FAULT >> 16: err |= __get_user(addr, &from->si_addr); to->si_addr = compat_ptr(addr); break; case __SI_POLL >> 16: err |= __get_user(to->si_band, &from->si_band); err |= __get_user(to->si_fd, &from->si_fd); break; case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ case __SI_MESGQ >> 16: err |= __get_user(to->si_pid, &from->si_pid); err |= __get_user(to->si_uid, &from->si_uid); err |= __get_user(to->si_int, &from->si_int); break; } } return err; } int copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from) { compat_uptr_t addr; compat_int_t val; int err; if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) return -EFAULT; /* If you change siginfo_t structure, please be sure this code is fixed accordingly. It should never copy any pad contained in the structure to avoid security leaks, but must copy the generic 3 ints plus the relevant union member. This routine must convert siginfo from 64bit to 32bit as well at the same time. */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); err |= __put_user((short)from->si_code, &to->si_code); if (from->si_code < 0) err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); else { switch (from->si_code >> 16) { case __SI_CHLD >> 16: err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); err |= __put_user(from->si_status, &to->si_status); default: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); break; case __SI_FAULT >> 16: addr = ptr_to_compat(from->si_addr); err |= __put_user(addr, &to->si_addr); break; case __SI_POLL >> 16: err |= __put_user(from->si_band, &to->si_band); err |= __put_user(from->si_fd, &to->si_fd); break; case __SI_TIMER >> 16: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); val = (compat_int_t)from->si_int; err |= __put_user(val, &to->si_int); break; case __SI_RT >> 16: /* Not generated by the kernel as of now. */ case __SI_MESGQ >> 16: err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_pid, &to->si_pid); val = (compat_int_t)from->si_int; err |= __put_user(val, &to->si_int); break; } } return err; } asmlinkage long compat_sys_rt_sigqueueinfo(int pid, int sig, struct compat_siginfo __user *uinfo) { siginfo_t info; if (copy_siginfo_from_user32(&info, uinfo)) return -EFAULT; /* Not even root can pretend to send signals from the kernel. Nor can they impersonate a kill(), which adds source info. */ if (info.si_code >= 0) return -EPERM; info.si_signo = sig; /* POSIX.1b doesn't mention process groups. */ return kill_proc_info(sig, &info, pid); }
gpl-2.0
SM-G920P/Hacker_Kernel_SM-G92X
fs/proc/consoles.c
7841
2249
/* * Copyright (c) 2010 Werner Fink, Jiri Slaby * * Licensed under GPLv2 */ #include <linux/console.h> #include <linux/kernel.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/tty_driver.h> /* * This is handler for /proc/consoles */ static int show_console_dev(struct seq_file *m, void *v) { static const struct { short flag; char name; } con_flags[] = { { CON_ENABLED, 'E' }, { CON_CONSDEV, 'C' }, { CON_BOOT, 'B' }, { CON_PRINTBUFFER, 'p' }, { CON_BRL, 'b' }, { CON_ANYTIME, 'a' }, }; char flags[ARRAY_SIZE(con_flags) + 1]; struct console *con = v; unsigned int a; int len; dev_t dev = 0; if (con->device) { const struct tty_driver *driver; int index; driver = con->device(con, &index); if (driver) { dev = MKDEV(driver->major, driver->minor_start); dev += index; } } for (a = 0; a < ARRAY_SIZE(con_flags); a++) flags[a] = (con->flags & con_flags[a].flag) ? con_flags[a].name : ' '; flags[a] = 0; seq_printf(m, "%s%d%n", con->name, con->index, &len); len = 21 - len; if (len < 1) len = 1; seq_printf(m, "%*c%c%c%c (%s)", len, ' ', con->read ? 'R' : '-', con->write ? 'W' : '-', con->unblank ? 'U' : '-', flags); if (dev) seq_printf(m, " %4d:%d", MAJOR(dev), MINOR(dev)); seq_printf(m, "\n"); return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { struct console *con; loff_t off = 0; console_lock(); for_each_console(con) if (off++ == *pos) break; return con; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { struct console *con = v; ++*pos; return con->next; } static void c_stop(struct seq_file *m, void *v) { console_unlock(); } static const struct seq_operations consoles_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = show_console_dev }; static int consoles_open(struct inode *inode, struct file *file) { return seq_open(file, &consoles_op); } static const struct file_operations proc_consoles_operations = { .open = consoles_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init proc_consoles_init(void) { proc_create("consoles", 0, NULL, &proc_consoles_operations); return 0; } module_init(proc_consoles_init);
gpl-2.0
zekezang/linux-2.6.38
sound/oss/sb_common.c
9377
29760
/* * sound/oss/sb_common.c * * Common routines for Sound Blaster compatible cards. * * * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. * * * Daniel J. Rodriksson: Modified sbintr to handle 8 and 16 bit interrupts * for full duplex support ( only sb16 by now ) * Rolf Fokkens: Added (BETA?) support for ES1887 chips. * (fokkensr@vertis.nl) Which means: You can adjust the recording levels. * * 2000/01/18 - separated sb_card and sb_common - * Jeff Garzik <jgarzik@pobox.com> * * 2000/09/18 - got rid of attach_uart401 * Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * 2001/01/26 - replaced CLI/STI with spinlocks * Chris Rankin <rankinc@zipworld.com.au> */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/slab.h> #include "sound_config.h" #include "sound_firmware.h" #include "mpu401.h" #include "sb_mixer.h" #include "sb.h" #include "sb_ess.h" /* * global module flag */ int sb_be_quiet; static sb_devc *detected_devc; /* For communication from probe to init */ static sb_devc *last_devc; /* For MPU401 initialization */ static unsigned char jazz_irq_bits[] = { 0, 0, 2, 3, 0, 1, 0, 4, 0, 2, 5, 0, 0, 0, 0, 6 }; static unsigned char jazz_dma_bits[] = { 0, 1, 0, 2, 0, 3, 0, 4 }; void *smw_free; /* * Jazz16 chipset specific control variables */ static int jazz16_base; /* Not detected */ static unsigned char jazz16_bits; /* I/O relocation bits */ static DEFINE_SPINLOCK(jazz16_lock); /* * Logitech Soundman Wave specific initialization code */ #ifdef SMW_MIDI0001_INCLUDED #include "smw-midi0001.h" #else static unsigned char *smw_ucode; static int smw_ucodeLen; #endif static sb_devc *last_sb; /* Last sb loaded */ int sb_dsp_command(sb_devc * devc, unsigned char val) { int i; unsigned long limit; limit = jiffies + HZ / 10; /* Timeout */ /* * Note! the i<500000 is an emergency exit. The sb_dsp_command() is sometimes * called while interrupts are disabled. This means that the timer is * disabled also. However the timeout situation is a abnormal condition. * Normally the DSP should be ready to accept commands after just couple of * loops. */ for (i = 0; i < 500000 && (limit-jiffies)>0; i++) { if ((inb(DSP_STATUS) & 0x80) == 0) { outb((val), DSP_COMMAND); return 1; } } printk(KERN_WARNING "Sound Blaster: DSP command(%x) timeout.\n", val); return 0; } int sb_dsp_get_byte(sb_devc * devc) { int i; for (i = 1000; i; i--) { if (inb(DSP_DATA_AVAIL) & 0x80) return inb(DSP_READ); } return 0xffff; } static void sb_intr (sb_devc *devc) { int status; unsigned char src = 0xff; if (devc->model == MDL_SB16) { src = sb_getmixer(devc, IRQ_STAT); /* Interrupt source register */ if (src & 4) /* MPU401 interrupt */ if(devc->midi_irq_cookie) uart401intr(devc->irq, devc->midi_irq_cookie); if (!(src & 3)) return; /* Not a DSP interrupt */ } if (devc->intr_active && (!devc->fullduplex || (src & 0x01))) { switch (devc->irq_mode) { case IMODE_OUTPUT: DMAbuf_outputintr(devc->dev, 1); break; case IMODE_INPUT: DMAbuf_inputintr(devc->dev); break; case IMODE_INIT: break; case IMODE_MIDI: sb_midi_interrupt(devc); break; default: /* printk(KERN_WARNING "Sound Blaster: Unexpected interrupt\n"); */ ; } } else if (devc->intr_active_16 && (src & 0x02)) { switch (devc->irq_mode_16) { case IMODE_OUTPUT: DMAbuf_outputintr(devc->dev, 1); break; case IMODE_INPUT: DMAbuf_inputintr(devc->dev); break; case IMODE_INIT: break; default: /* printk(KERN_WARNING "Sound Blaster: Unexpected interrupt\n"); */ ; } } /* * Acknowledge interrupts */ if (src & 0x01) status = inb(DSP_DATA_AVAIL); if (devc->model == MDL_SB16 && src & 0x02) status = inb(DSP_DATA_AVL16); } static void pci_intr(sb_devc *devc) { int src = inb(devc->pcibase+0x1A); src&=3; if(src) sb_intr(devc); } static irqreturn_t sbintr(int irq, void *dev_id) { sb_devc *devc = dev_id; devc->irq_ok = 1; switch (devc->model) { case MDL_ESSPCI: pci_intr (devc); break; case MDL_ESS: ess_intr (devc); break; default: sb_intr (devc); break; } return IRQ_HANDLED; } int sb_dsp_reset(sb_devc * devc) { int loopc; DEB(printk("Entered sb_dsp_reset()\n")); if (devc->model == MDL_ESS) return ess_dsp_reset (devc); /* This is only for non-ESS chips */ outb(1, DSP_RESET); udelay(10); outb(0, DSP_RESET); udelay(30); for (loopc = 0; loopc < 1000 && !(inb(DSP_DATA_AVAIL) & 0x80); loopc++); if (inb(DSP_READ) != 0xAA) { DDB(printk("sb: No response to RESET\n")); return 0; /* Sorry */ } DEB(printk("sb_dsp_reset() OK\n")); return 1; } static void dsp_get_vers(sb_devc * devc) { int i; unsigned long flags; DDB(printk("Entered dsp_get_vers()\n")); spin_lock_irqsave(&devc->lock, flags); devc->major = devc->minor = 0; sb_dsp_command(devc, 0xe1); /* Get version */ for (i = 100000; i; i--) { if (inb(DSP_DATA_AVAIL) & 0x80) { if (devc->major == 0) devc->major = inb(DSP_READ); else { devc->minor = inb(DSP_READ); break; } } } spin_unlock_irqrestore(&devc->lock, flags); DDB(printk("DSP version %d.%02d\n", devc->major, devc->minor)); } static int sb16_set_dma_hw(sb_devc * devc) { int bits; if (devc->dma8 != 0 && devc->dma8 != 1 && devc->dma8 != 3) { printk(KERN_ERR "SB16: Invalid 8 bit DMA (%d)\n", devc->dma8); return 0; } bits = (1 << devc->dma8); if (devc->dma16 >= 5 && devc->dma16 <= 7) bits |= (1 << devc->dma16); sb_setmixer(devc, DMA_NR, bits); return 1; } static void sb16_set_mpu_port(sb_devc * devc, struct address_info *hw_config) { /* * This routine initializes new MIDI port setup register of SB Vibra (CT2502). */ unsigned char bits = sb_getmixer(devc, 0x84) & ~0x06; switch (hw_config->io_base) { case 0x300: sb_setmixer(devc, 0x84, bits | 0x04); break; case 0x330: sb_setmixer(devc, 0x84, bits | 0x00); break; default: sb_setmixer(devc, 0x84, bits | 0x02); /* Disable MPU */ printk(KERN_ERR "SB16: Invalid MIDI I/O port %x\n", hw_config->io_base); } } static int sb16_set_irq_hw(sb_devc * devc, int level) { int ival; switch (level) { case 5: ival = 2; break; case 7: ival = 4; break; case 9: ival = 1; break; case 10: ival = 8; break; default: printk(KERN_ERR "SB16: Invalid IRQ%d\n", level); return 0; } sb_setmixer(devc, IRQ_NR, ival); return 1; } static void relocate_Jazz16(sb_devc * devc, struct address_info *hw_config) { unsigned char bits = 0; unsigned long flags; if (jazz16_base != 0 && jazz16_base != hw_config->io_base) return; switch (hw_config->io_base) { case 0x220: bits = 1; break; case 0x240: bits = 2; break; case 0x260: bits = 3; break; default: return; } bits = jazz16_bits = bits << 5; jazz16_base = hw_config->io_base; /* * Magic wake up sequence by writing to 0x201 (aka Joystick port) */ spin_lock_irqsave(&jazz16_lock, flags); outb((0xAF), 0x201); outb((0x50), 0x201); outb((bits), 0x201); spin_unlock_irqrestore(&jazz16_lock, flags); } static int init_Jazz16(sb_devc * devc, struct address_info *hw_config) { char name[100]; /* * First try to check that the card has Jazz16 chip. It identifies itself * by returning 0x12 as response to DSP command 0xfa. */ if (!sb_dsp_command(devc, 0xfa)) return 0; if (sb_dsp_get_byte(devc) != 0x12) return 0; /* * OK so far. Now configure the IRQ and DMA channel used by the card. */ if (hw_config->irq < 1 || hw_config->irq > 15 || jazz_irq_bits[hw_config->irq] == 0) { printk(KERN_ERR "Jazz16: Invalid interrupt (IRQ%d)\n", hw_config->irq); return 0; } if (hw_config->dma < 0 || hw_config->dma > 3 || jazz_dma_bits[hw_config->dma] == 0) { printk(KERN_ERR "Jazz16: Invalid 8 bit DMA (DMA%d)\n", hw_config->dma); return 0; } if (hw_config->dma2 < 0) { printk(KERN_ERR "Jazz16: No 16 bit DMA channel defined\n"); return 0; } if (hw_config->dma2 < 5 || hw_config->dma2 > 7 || jazz_dma_bits[hw_config->dma2] == 0) { printk(KERN_ERR "Jazz16: Invalid 16 bit DMA (DMA%d)\n", hw_config->dma2); return 0; } devc->dma16 = hw_config->dma2; if (!sb_dsp_command(devc, 0xfb)) return 0; if (!sb_dsp_command(devc, jazz_dma_bits[hw_config->dma] | (jazz_dma_bits[hw_config->dma2] << 4))) return 0; if (!sb_dsp_command(devc, jazz_irq_bits[hw_config->irq])) return 0; /* * Now we have configured a standard Jazz16 device. */ devc->model = MDL_JAZZ; strcpy(name, "Jazz16"); hw_config->name = "Jazz16"; devc->caps |= SB_NO_MIDI; return 1; } static void relocate_ess1688(sb_devc * devc) { unsigned char bits; switch (devc->base) { case 0x220: bits = 0x04; break; case 0x230: bits = 0x05; break; case 0x240: bits = 0x06; break; case 0x250: bits = 0x07; break; default: return; /* Wrong port */ } DDB(printk("Doing ESS1688 address selection\n")); /* * ES1688 supports two alternative ways for software address config. * First try the so called Read-Sequence-Key method. */ /* Reset the sequence logic */ inb(0x229); inb(0x229); inb(0x229); /* Perform the read sequence */ inb(0x22b); inb(0x229); inb(0x22b); inb(0x229); inb(0x229); inb(0x22b); inb(0x229); /* Select the base address by reading from it. Then probe using the port. */ inb(devc->base); if (sb_dsp_reset(devc)) /* Bingo */ return; #if 0 /* This causes system lockups (Nokia 386/25 at least) */ /* * The last resort is the system control register method. */ outb((0x00), 0xfb); /* 0xFB is the unlock register */ outb((0x00), 0xe0); /* Select index 0 */ outb((bits), 0xe1); /* Write the config bits */ outb((0x00), 0xf9); /* 0xFB is the lock register */ #endif } int sb_dsp_detect(struct address_info *hw_config, int pci, int pciio, struct sb_module_options *sbmo) { sb_devc sb_info; sb_devc *devc = &sb_info; memset((char *) &sb_info, 0, sizeof(sb_info)); /* Zero everything */ /* Copy module options in place */ if(sbmo) memcpy(&devc->sbmo, sbmo, sizeof(struct sb_module_options)); sb_info.my_mididev = -1; sb_info.my_mixerdev = -1; sb_info.dev = -1; /* * Initialize variables */ DDB(printk("sb_dsp_detect(%x) entered\n", hw_config->io_base)); spin_lock_init(&devc->lock); devc->type = hw_config->card_subtype; devc->base = hw_config->io_base; devc->irq = hw_config->irq; devc->dma8 = hw_config->dma; devc->dma16 = -1; devc->pcibase = pciio; if(pci == SB_PCI_ESSMAESTRO) { devc->model = MDL_ESSPCI; devc->caps |= SB_PCI_IRQ; hw_config->driver_use_1 |= SB_PCI_IRQ; hw_config->card_subtype = MDL_ESSPCI; } if(pci == SB_PCI_YAMAHA) { devc->model = MDL_YMPCI; devc->caps |= SB_PCI_IRQ; hw_config->driver_use_1 |= SB_PCI_IRQ; hw_config->card_subtype = MDL_YMPCI; printk("Yamaha PCI mode.\n"); } if (devc->sbmo.acer) { unsigned long flags; spin_lock_irqsave(&devc->lock, flags); inb(devc->base + 0x09); inb(devc->base + 0x09); inb(devc->base + 0x09); inb(devc->base + 0x0b); inb(devc->base + 0x09); inb(devc->base + 0x0b); inb(devc->base + 0x09); inb(devc->base + 0x09); inb(devc->base + 0x0b); inb(devc->base + 0x09); inb(devc->base + 0x00); spin_unlock_irqrestore(&devc->lock, flags); } /* * Detect the device */ if (sb_dsp_reset(devc)) dsp_get_vers(devc); else devc->major = 0; if (devc->type == 0 || devc->type == MDL_JAZZ || devc->type == MDL_SMW) if (devc->major == 0 || (devc->major == 3 && devc->minor == 1)) relocate_Jazz16(devc, hw_config); if (devc->major == 0 && (devc->type == MDL_ESS || devc->type == 0)) relocate_ess1688(devc); if (!sb_dsp_reset(devc)) { DDB(printk("SB reset failed\n")); #ifdef MODULE printk(KERN_INFO "sb: dsp reset failed.\n"); #endif return 0; } if (devc->major == 0) dsp_get_vers(devc); if (devc->major == 3 && devc->minor == 1) { if (devc->type == MDL_AZTECH) /* SG Washington? */ { if (sb_dsp_command(devc, 0x09)) if (sb_dsp_command(devc, 0x00)) /* Enter WSS mode */ { int i; /* Have some delay */ for (i = 0; i < 10000; i++) inb(DSP_DATA_AVAIL); devc->caps = SB_NO_AUDIO | SB_NO_MIDI; /* Mixer only */ devc->model = MDL_AZTECH; } } } if(devc->type == MDL_ESSPCI) devc->model = MDL_ESSPCI; if(devc->type == MDL_YMPCI) { printk("YMPCI selected\n"); devc->model = MDL_YMPCI; } /* * Save device information for sb_dsp_init() */ detected_devc = kmalloc(sizeof(sb_devc), GFP_KERNEL); if (detected_devc == NULL) { printk(KERN_ERR "sb: Can't allocate memory for device information\n"); return 0; } memcpy(detected_devc, devc, sizeof(sb_devc)); MDB(printk(KERN_INFO "SB %d.%02d detected OK (%x)\n", devc->major, devc->minor, hw_config->io_base)); return 1; } int sb_dsp_init(struct address_info *hw_config, struct module *owner) { sb_devc *devc; char name[100]; extern int sb_be_quiet; int mixer22, mixer30; /* * Check if we had detected a SB device earlier */ DDB(printk("sb_dsp_init(%x) entered\n", hw_config->io_base)); name[0] = 0; if (detected_devc == NULL) { MDB(printk("No detected device\n")); return 0; } devc = detected_devc; detected_devc = NULL; if (devc->base != hw_config->io_base) { DDB(printk("I/O port mismatch\n")); release_region(devc->base, 16); return 0; } /* * Now continue initialization of the device */ devc->caps = hw_config->driver_use_1; if (!((devc->caps & SB_NO_AUDIO) && (devc->caps & SB_NO_MIDI)) && hw_config->irq > 0) { /* IRQ setup */ /* * ESS PCI cards do shared PCI IRQ stuff. Since they * will get shared PCI irq lines we must cope. */ int i=(devc->caps&SB_PCI_IRQ)?IRQF_SHARED:0; if (request_irq(hw_config->irq, sbintr, i, "soundblaster", devc) < 0) { printk(KERN_ERR "SB: Can't allocate IRQ%d\n", hw_config->irq); release_region(devc->base, 16); return 0; } devc->irq_ok = 0; if (devc->major == 4) if (!sb16_set_irq_hw(devc, devc->irq)) /* Unsupported IRQ */ { free_irq(devc->irq, devc); release_region(devc->base, 16); return 0; } if ((devc->type == 0 || devc->type == MDL_ESS) && devc->major == 3 && devc->minor == 1) { /* Handle various chipsets which claim they are SB Pro compatible */ if ((devc->type != 0 && devc->type != MDL_ESS) || !ess_init(devc, hw_config)) { if ((devc->type != 0 && devc->type != MDL_JAZZ && devc->type != MDL_SMW) || !init_Jazz16(devc, hw_config)) { DDB(printk("This is a genuine SB Pro\n")); } } } if (devc->major == 4 && devc->minor <= 11 ) /* Won't work */ devc->irq_ok = 1; else { int n; for (n = 0; n < 3 && devc->irq_ok == 0; n++) { if (sb_dsp_command(devc, 0xf2)) /* Cause interrupt immediately */ { int i; for (i = 0; !devc->irq_ok && i < 10000; i++); } } if (!devc->irq_ok) printk(KERN_WARNING "sb: Interrupt test on IRQ%d failed - Probable IRQ conflict\n", devc->irq); else { DDB(printk("IRQ test OK (IRQ%d)\n", devc->irq)); } } } /* IRQ setup */ last_sb = devc; switch (devc->major) { case 1: /* SB 1.0 or 1.5 */ devc->model = hw_config->card_subtype = MDL_SB1; break; case 2: /* SB 2.x */ if (devc->minor == 0) devc->model = hw_config->card_subtype = MDL_SB2; else devc->model = hw_config->card_subtype = MDL_SB201; break; case 3: /* SB Pro and most clones */ switch (devc->model) { case 0: devc->model = hw_config->card_subtype = MDL_SBPRO; if (hw_config->name == NULL) hw_config->name = "Sound Blaster Pro (8 BIT ONLY)"; break; case MDL_ESS: ess_dsp_init(devc, hw_config); break; } break; case 4: devc->model = hw_config->card_subtype = MDL_SB16; /* * ALS007 and ALS100 return DSP version 4.2 and have 2 post-reset !=0 * registers at 0x3c and 0x4c (output ctrl registers on ALS007) whereas * a "standard" SB16 doesn't have a register at 0x4c. ALS100 actively * updates register 0x22 whenever 0x30 changes, as per the SB16 spec. * Since ALS007 doesn't, this can be used to differentiate the 2 cards. */ if ((devc->minor == 2) && sb_getmixer(devc,0x3c) && sb_getmixer(devc,0x4c)) { mixer30 = sb_getmixer(devc,0x30); sb_setmixer(devc,0x22,(mixer22=sb_getmixer(devc,0x22)) & 0x0f); sb_setmixer(devc,0x30,0xff); /* ALS100 will force 0x30 to 0xf8 like SB16; ALS007 will allow 0xff. */ /* Register 0x22 & 0xf0 on ALS100 == 0xf0; on ALS007 it == 0x10. */ if ((sb_getmixer(devc,0x30) != 0xff) || ((sb_getmixer(devc,0x22) & 0xf0) != 0x10)) { devc->submodel = SUBMDL_ALS100; if (hw_config->name == NULL) hw_config->name = "Sound Blaster 16 (ALS-100)"; } else { sb_setmixer(devc,0x3c,0x1f); /* Enable all inputs */ sb_setmixer(devc,0x4c,0x1f); sb_setmixer(devc,0x22,mixer22); /* Restore 0x22 to original value */ devc->submodel = SUBMDL_ALS007; if (hw_config->name == NULL) hw_config->name = "Sound Blaster 16 (ALS-007)"; } sb_setmixer(devc,0x30,mixer30); } else if (hw_config->name == NULL) hw_config->name = "Sound Blaster 16"; if (hw_config->dma2 == -1) devc->dma16 = devc->dma8; else if (hw_config->dma2 < 5 || hw_config->dma2 > 7) { printk(KERN_WARNING "SB16: Bad or missing 16 bit DMA channel\n"); devc->dma16 = devc->dma8; } else devc->dma16 = hw_config->dma2; if(!sb16_set_dma_hw(devc)) { free_irq(devc->irq, devc); release_region(hw_config->io_base, 16); return 0; } devc->caps |= SB_NO_MIDI; } if (!(devc->caps & SB_NO_MIXER)) if (devc->major == 3 || devc->major == 4) sb_mixer_init(devc, owner); if (!(devc->caps & SB_NO_MIDI)) sb_dsp_midi_init(devc, owner); if (hw_config->name == NULL) hw_config->name = "Sound Blaster (8 BIT/MONO ONLY)"; sprintf(name, "%s (%d.%02d)", hw_config->name, devc->major, devc->minor); conf_printf(name, hw_config); /* * Assuming that a sound card is Sound Blaster (compatible) is the most common * configuration error and the mother of all problems. Usually sound cards * emulate SB Pro but in addition they have a 16 bit native mode which should be * used in Unix. See Readme.cards for more information about configuring OSS/Free * properly. */ if (devc->model <= MDL_SBPRO) { if (devc->major == 3 && devc->minor != 1) /* "True" SB Pro should have v3.1 (rare ones may have 3.2). */ { printk(KERN_INFO "This sound card may not be fully Sound Blaster Pro compatible.\n"); printk(KERN_INFO "In many cases there is another way to configure OSS so that\n"); printk(KERN_INFO "it works properly with OSS (for example in 16 bit mode).\n"); printk(KERN_INFO "Please ignore this message if you _really_ have a SB Pro.\n"); } else if (!sb_be_quiet && devc->model == MDL_SBPRO) { printk(KERN_INFO "SB DSP version is just %d.%02d which means that your card is\n", devc->major, devc->minor); printk(KERN_INFO "several years old (8 bit only device) or alternatively the sound driver\n"); printk(KERN_INFO "is incorrectly configured.\n"); } } hw_config->card_subtype = devc->model; hw_config->slots[0]=devc->dev; last_devc = devc; /* For SB MPU detection */ if (!(devc->caps & SB_NO_AUDIO) && devc->dma8 >= 0) { if (sound_alloc_dma(devc->dma8, "SoundBlaster8")) { printk(KERN_WARNING "Sound Blaster: Can't allocate 8 bit DMA channel %d\n", devc->dma8); } if (devc->dma16 >= 0 && devc->dma16 != devc->dma8) { if (sound_alloc_dma(devc->dma16, "SoundBlaster16")) printk(KERN_WARNING "Sound Blaster: can't allocate 16 bit DMA channel %d.\n", devc->dma16); } sb_audio_init(devc, name, owner); hw_config->slots[0]=devc->dev; } else { MDB(printk("Sound Blaster: no audio devices found.\n")); } return 1; } /* if (sbmpu) below we allow mpu401 to manage the midi devs otherwise we have to unload them. (Andrzej Krzysztofowicz) */ void sb_dsp_unload(struct address_info *hw_config, int sbmpu) { sb_devc *devc; devc = audio_devs[hw_config->slots[0]]->devc; if (devc && devc->base == hw_config->io_base) { if ((devc->model & MDL_ESS) && devc->pcibase) release_region(devc->pcibase, 8); release_region(devc->base, 16); if (!(devc->caps & SB_NO_AUDIO)) { sound_free_dma(devc->dma8); if (devc->dma16 >= 0) sound_free_dma(devc->dma16); } if (!(devc->caps & SB_NO_AUDIO && devc->caps & SB_NO_MIDI)) { if (devc->irq > 0) free_irq(devc->irq, devc); sb_mixer_unload(devc); /* We don't have to do this bit any more the UART401 is its own master -- Krzysztof Halasa */ /* But we have to do it, if UART401 is not detected */ if (!sbmpu) sound_unload_mididev(devc->my_mididev); sound_unload_audiodev(devc->dev); } kfree(devc); } else release_region(hw_config->io_base, 16); kfree(detected_devc); } /* * Mixer access routines * * ES1887 modifications: some mixer registers reside in the * range above 0xa0. These must be accessed in another way. */ void sb_setmixer(sb_devc * devc, unsigned int port, unsigned int value) { unsigned long flags; if (devc->model == MDL_ESS) { ess_setmixer (devc, port, value); return; } spin_lock_irqsave(&devc->lock, flags); outb(((unsigned char) (port & 0xff)), MIXER_ADDR); udelay(20); outb(((unsigned char) (value & 0xff)), MIXER_DATA); udelay(20); spin_unlock_irqrestore(&devc->lock, flags); } unsigned int sb_getmixer(sb_devc * devc, unsigned int port) { unsigned int val; unsigned long flags; if (devc->model == MDL_ESS) return ess_getmixer (devc, port); spin_lock_irqsave(&devc->lock, flags); outb(((unsigned char) (port & 0xff)), MIXER_ADDR); udelay(20); val = inb(MIXER_DATA); udelay(20); spin_unlock_irqrestore(&devc->lock, flags); return val; } void sb_chgmixer (sb_devc * devc, unsigned int reg, unsigned int mask, unsigned int val) { int value; value = sb_getmixer(devc, reg); value = (value & ~mask) | (val & mask); sb_setmixer(devc, reg, value); } /* * MPU401 MIDI initialization. */ static void smw_putmem(sb_devc * devc, int base, int addr, unsigned char val) { unsigned long flags; spin_lock_irqsave(&jazz16_lock, flags); /* NOT the SB card? */ outb((addr & 0xff), base + 1); /* Low address bits */ outb((addr >> 8), base + 2); /* High address bits */ outb((val), base); /* Data */ spin_unlock_irqrestore(&jazz16_lock, flags); } static unsigned char smw_getmem(sb_devc * devc, int base, int addr) { unsigned long flags; unsigned char val; spin_lock_irqsave(&jazz16_lock, flags); /* NOT the SB card? */ outb((addr & 0xff), base + 1); /* Low address bits */ outb((addr >> 8), base + 2); /* High address bits */ val = inb(base); /* Data */ spin_unlock_irqrestore(&jazz16_lock, flags); return val; } static int smw_midi_init(sb_devc * devc, struct address_info *hw_config) { int mpu_base = hw_config->io_base; int mp_base = mpu_base + 4; /* Microcontroller base */ int i; unsigned char control; /* * Reset the microcontroller so that the RAM can be accessed */ control = inb(mpu_base + 7); outb((control | 3), mpu_base + 7); /* Set last two bits to 1 (?) */ outb(((control & 0xfe) | 2), mpu_base + 7); /* xxxxxxx0 resets the mc */ mdelay(3); /* Wait at least 1ms */ outb((control & 0xfc), mpu_base + 7); /* xxxxxx00 enables RAM */ /* * Detect microcontroller by probing the 8k RAM area */ smw_putmem(devc, mp_base, 0, 0x00); smw_putmem(devc, mp_base, 1, 0xff); udelay(10); if (smw_getmem(devc, mp_base, 0) != 0x00 || smw_getmem(devc, mp_base, 1) != 0xff) { DDB(printk("SM Wave: No microcontroller RAM detected (%02x, %02x)\n", smw_getmem(devc, mp_base, 0), smw_getmem(devc, mp_base, 1))); return 0; /* No RAM */ } /* * There is RAM so assume it's really a SM Wave */ devc->model = MDL_SMW; smw_mixer_init(devc); #ifdef MODULE if (!smw_ucode) { smw_ucodeLen = mod_firmware_load("/etc/sound/midi0001.bin", (void *) &smw_ucode); smw_free = smw_ucode; } #endif if (smw_ucodeLen > 0) { if (smw_ucodeLen != 8192) { printk(KERN_ERR "SM Wave: Invalid microcode (MIDI0001.BIN) length\n"); return 1; } /* * Download microcode */ for (i = 0; i < 8192; i++) smw_putmem(devc, mp_base, i, smw_ucode[i]); /* * Verify microcode */ for (i = 0; i < 8192; i++) if (smw_getmem(devc, mp_base, i) != smw_ucode[i]) { printk(KERN_ERR "SM Wave: Microcode verification failed\n"); return 0; } } control = 0; #ifdef SMW_SCSI_IRQ /* * Set the SCSI interrupt (IRQ2/9, IRQ3 or IRQ10). The SCSI interrupt * is disabled by default. * * FIXME - make this a module option * * BTW the Zilog 5380 SCSI controller is located at MPU base + 0x10. */ { static unsigned char scsi_irq_bits[] = { 0, 0, 3, 1, 0, 0, 0, 0, 0, 3, 2, 0, 0, 0, 0, 0 }; control |= scsi_irq_bits[SMW_SCSI_IRQ] << 6; } #endif #ifdef SMW_OPL4_ENABLE /* * Make the OPL4 chip visible on the PC bus at 0x380. * * There is no need to enable this feature since this driver * doesn't support OPL4 yet. Also there is no RAM in SM Wave so * enabling OPL4 is pretty useless. */ control |= 0x10; /* Uses IRQ12 if bit 0x20 == 0 */ /* control |= 0x20; Uncomment this if you want to use IRQ7 */ #endif outb((control | 0x03), mpu_base + 7); /* xxxxxx11 restarts */ hw_config->name = "SoundMan Wave"; return 1; } static int init_Jazz16_midi(sb_devc * devc, struct address_info *hw_config) { int mpu_base = hw_config->io_base; int sb_base = devc->base; int irq = hw_config->irq; unsigned char bits = 0; unsigned long flags; if (irq < 0) irq *= -1; if (irq < 1 || irq > 15 || jazz_irq_bits[irq] == 0) { printk(KERN_ERR "Jazz16: Invalid MIDI interrupt (IRQ%d)\n", irq); return 0; } switch (sb_base) { case 0x220: bits = 1; break; case 0x240: bits = 2; break; case 0x260: bits = 3; break; default: return 0; } bits = jazz16_bits = bits << 5; switch (mpu_base) { case 0x310: bits |= 1; break; case 0x320: bits |= 2; break; case 0x330: bits |= 3; break; default: printk(KERN_ERR "Jazz16: Invalid MIDI I/O port %x\n", mpu_base); return 0; } /* * Magic wake up sequence by writing to 0x201 (aka Joystick port) */ spin_lock_irqsave(&jazz16_lock, flags); outb(0xAF, 0x201); outb(0x50, 0x201); outb(bits, 0x201); spin_unlock_irqrestore(&jazz16_lock, flags); hw_config->name = "Jazz16"; smw_midi_init(devc, hw_config); if (!sb_dsp_command(devc, 0xfb)) return 0; if (!sb_dsp_command(devc, jazz_dma_bits[devc->dma8] | (jazz_dma_bits[devc->dma16] << 4))) return 0; if (!sb_dsp_command(devc, jazz_irq_bits[devc->irq] | (jazz_irq_bits[irq] << 4))) return 0; return 1; } int probe_sbmpu(struct address_info *hw_config, struct module *owner) { sb_devc *devc = last_devc; int ret; if (last_devc == NULL) return 0; last_devc = NULL; if (hw_config->io_base <= 0) { /* The real vibra16 is fine about this, but we have to go wipe up after Cyrix again */ if(devc->model == MDL_SB16 && devc->minor >= 12) { unsigned char bits = sb_getmixer(devc, 0x84) & ~0x06; sb_setmixer(devc, 0x84, bits | 0x02); /* Disable MPU */ } return 0; } #if defined(CONFIG_SOUND_MPU401) if (devc->model == MDL_ESS) { struct resource *ports; ports = request_region(hw_config->io_base, 2, "mpu401"); if (!ports) { printk(KERN_ERR "sbmpu: I/O port conflict (%x)\n", hw_config->io_base); return 0; } if (!ess_midi_init(devc, hw_config)) { release_region(hw_config->io_base, 2); return 0; } hw_config->name = "ESS1xxx MPU"; devc->midi_irq_cookie = NULL; if (!probe_mpu401(hw_config, ports)) { release_region(hw_config->io_base, 2); return 0; } attach_mpu401(hw_config, owner); if (last_sb->irq == -hw_config->irq) last_sb->midi_irq_cookie = (void *)(long) hw_config->slots[1]; return 1; } #endif switch (devc->model) { case MDL_SB16: if (hw_config->io_base != 0x300 && hw_config->io_base != 0x330) { printk(KERN_ERR "SB16: Invalid MIDI port %x\n", hw_config->io_base); return 0; } hw_config->name = "Sound Blaster 16"; if (hw_config->irq < 3 || hw_config->irq == devc->irq) hw_config->irq = -devc->irq; if (devc->minor > 12) /* What is Vibra's version??? */ sb16_set_mpu_port(devc, hw_config); break; case MDL_JAZZ: if (hw_config->irq < 3 || hw_config->irq == devc->irq) hw_config->irq = -devc->irq; if (!init_Jazz16_midi(devc, hw_config)) return 0; break; case MDL_YMPCI: hw_config->name = "Yamaha PCI Legacy"; printk("Yamaha PCI legacy UART401 check.\n"); break; default: return 0; } ret = probe_uart401(hw_config, owner); if (ret) last_sb->midi_irq_cookie=midi_devs[hw_config->slots[4]]->devc; return ret; } void unload_sbmpu(struct address_info *hw_config) { #if defined(CONFIG_SOUND_MPU401) if (!strcmp (hw_config->name, "ESS1xxx MPU")) { unload_mpu401(hw_config); return; } #endif unload_uart401(hw_config); } EXPORT_SYMBOL(sb_dsp_init); EXPORT_SYMBOL(sb_dsp_detect); EXPORT_SYMBOL(sb_dsp_unload); EXPORT_SYMBOL(sb_be_quiet); EXPORT_SYMBOL(probe_sbmpu); EXPORT_SYMBOL(unload_sbmpu); EXPORT_SYMBOL(smw_free); MODULE_LICENSE("GPL");
gpl-2.0
clearwa/mypi
drivers/vhost/scsi.c
162
65753
/******************************************************************************* * Vhost kernel TCM fabric driver for virtio SCSI initiators * * (C) Copyright 2010-2013 Datera, Inc. * (C) Copyright 2010-2012 IBM Corp. * * Licensed to the Linux Foundation under the General Public License (GPL) version 2. * * Authors: Nicholas A. Bellinger <nab@daterainc.com> * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * ****************************************************************************/ #include <linux/module.h> #include <linux/moduleparam.h> #include <generated/utsrelease.h> #include <linux/utsname.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/kthread.h> #include <linux/types.h> #include <linux/string.h> #include <linux/configfs.h> #include <linux/ctype.h> #include <linux/compat.h> #include <linux/eventfd.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> #include <target/target_core_fabric_configfs.h> #include <target/target_core_configfs.h> #include <target/configfs_macros.h> #include <linux/vhost.h> #include <linux/virtio_scsi.h> #include <linux/llist.h> #include <linux/bitmap.h> #include <linux/percpu_ida.h> #include "vhost.h" #define VHOST_SCSI_VERSION "v0.1" #define VHOST_SCSI_NAMELEN 256 #define VHOST_SCSI_MAX_CDB_SIZE 32 #define VHOST_SCSI_DEFAULT_TAGS 256 #define VHOST_SCSI_PREALLOC_SGLS 2048 #define VHOST_SCSI_PREALLOC_UPAGES 2048 #define VHOST_SCSI_PREALLOC_PROT_SGLS 512 struct vhost_scsi_inflight { /* Wait for the flush operation to finish */ struct completion comp; /* Refcount for the inflight reqs */ struct kref kref; }; struct vhost_scsi_cmd { /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ int tvc_vq_desc; /* virtio-scsi initiator task attribute */ int tvc_task_attr; /* virtio-scsi response incoming iovecs */ int tvc_in_iovs; /* virtio-scsi initiator data direction */ enum dma_data_direction tvc_data_direction; /* Expected data transfer length from virtio-scsi header */ u32 tvc_exp_data_len; /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */ u64 tvc_tag; /* The number of scatterlists associated with this cmd */ u32 tvc_sgl_count; u32 tvc_prot_sgl_count; /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */ u32 tvc_lun; /* Pointer to the SGL formatted memory from virtio-scsi */ struct scatterlist *tvc_sgl; struct scatterlist *tvc_prot_sgl; struct page **tvc_upages; /* Pointer to response header iovec */ struct iovec *tvc_resp_iov; /* Pointer to vhost_scsi for our device */ struct vhost_scsi *tvc_vhost; /* Pointer to vhost_virtqueue for the cmd */ struct vhost_virtqueue *tvc_vq; /* Pointer to vhost nexus memory */ struct vhost_scsi_nexus *tvc_nexus; /* The TCM I/O descriptor that is accessed via container_of() */ struct se_cmd tvc_se_cmd; /* work item used for cmwq dispatch to vhost_scsi_submission_work() */ struct work_struct work; /* Copy of the incoming SCSI command descriptor block (CDB) */ unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE]; /* Sense buffer that will be mapped into outgoing status */ unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; /* Completed commands list, serviced from vhost worker thread */ struct llist_node tvc_completion_list; /* Used to track inflight cmd */ struct vhost_scsi_inflight *inflight; }; struct vhost_scsi_nexus { /* Pointer to TCM session for I_T Nexus */ struct se_session *tvn_se_sess; }; struct vhost_scsi_nacl { /* Binary World Wide unique Port Name for Vhost Initiator port */ u64 iport_wwpn; /* ASCII formatted WWPN for Sas Initiator port */ char iport_name[VHOST_SCSI_NAMELEN]; /* Returned by vhost_scsi_make_nodeacl() */ struct se_node_acl se_node_acl; }; struct vhost_scsi_tpg { /* Vhost port target portal group tag for TCM */ u16 tport_tpgt; /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ int tv_tpg_port_count; /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ int tv_tpg_vhost_count; /* Used for enabling T10-PI with legacy devices */ int tv_fabric_prot_type; /* list for vhost_scsi_list */ struct list_head tv_tpg_list; /* Used to protect access for tpg_nexus */ struct mutex tv_tpg_mutex; /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */ struct vhost_scsi_nexus *tpg_nexus; /* Pointer back to vhost_scsi_tport */ struct vhost_scsi_tport *tport; /* Returned by vhost_scsi_make_tpg() */ struct se_portal_group se_tpg; /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */ struct vhost_scsi *vhost_scsi; }; struct vhost_scsi_tport { /* SCSI protocol the tport is providing */ u8 tport_proto_id; /* Binary World Wide unique Port Name for Vhost Target port */ u64 tport_wwpn; /* ASCII formatted WWPN for Vhost Target port */ char tport_name[VHOST_SCSI_NAMELEN]; /* Returned by vhost_scsi_make_tport() */ struct se_wwn tport_wwn; }; struct vhost_scsi_evt { /* event to be sent to guest */ struct virtio_scsi_event event; /* event list, serviced from vhost worker thread */ struct llist_node list; }; enum { VHOST_SCSI_VQ_CTL = 0, VHOST_SCSI_VQ_EVT = 1, VHOST_SCSI_VQ_IO = 2, }; /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */ enum { VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | (1ULL << VIRTIO_SCSI_F_T10_PI) | (1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) }; #define VHOST_SCSI_MAX_TARGET 256 #define VHOST_SCSI_MAX_VQ 128 #define VHOST_SCSI_MAX_EVENT 128 struct vhost_scsi_virtqueue { struct vhost_virtqueue vq; /* * Reference counting for inflight reqs, used for flush operation. At * each time, one reference tracks new commands submitted, while we * wait for another one to reach 0. */ struct vhost_scsi_inflight inflights[2]; /* * Indicate current inflight in use, protected by vq->mutex. * Writers must also take dev mutex and flush under it. */ int inflight_idx; }; struct vhost_scsi { /* Protected by vhost_scsi->dev.mutex */ struct vhost_scsi_tpg **vs_tpg; char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; struct vhost_dev dev; struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ]; struct vhost_work vs_completion_work; /* cmd completion work item */ struct llist_head vs_completion_list; /* cmd completion queue */ struct vhost_work vs_event_work; /* evt injection work item */ struct llist_head vs_event_list; /* evt injection queue */ bool vs_events_missed; /* any missed events, protected by vq->mutex */ int vs_events_nr; /* num of pending events, protected by vq->mutex */ }; static struct target_core_fabric_ops vhost_scsi_ops; static struct workqueue_struct *vhost_scsi_workqueue; /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */ static DEFINE_MUTEX(vhost_scsi_mutex); static LIST_HEAD(vhost_scsi_list); static int iov_num_pages(void __user *iov_base, size_t iov_len) { return (PAGE_ALIGN((unsigned long)iov_base + iov_len) - ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT; } static void vhost_scsi_done_inflight(struct kref *kref) { struct vhost_scsi_inflight *inflight; inflight = container_of(kref, struct vhost_scsi_inflight, kref); complete(&inflight->comp); } static void vhost_scsi_init_inflight(struct vhost_scsi *vs, struct vhost_scsi_inflight *old_inflight[]) { struct vhost_scsi_inflight *new_inflight; struct vhost_virtqueue *vq; int idx, i; for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { vq = &vs->vqs[i].vq; mutex_lock(&vq->mutex); /* store old infight */ idx = vs->vqs[i].inflight_idx; if (old_inflight) old_inflight[i] = &vs->vqs[i].inflights[idx]; /* setup new infight */ vs->vqs[i].inflight_idx = idx ^ 1; new_inflight = &vs->vqs[i].inflights[idx ^ 1]; kref_init(&new_inflight->kref); init_completion(&new_inflight->comp); mutex_unlock(&vq->mutex); } } static struct vhost_scsi_inflight * vhost_scsi_get_inflight(struct vhost_virtqueue *vq) { struct vhost_scsi_inflight *inflight; struct vhost_scsi_virtqueue *svq; svq = container_of(vq, struct vhost_scsi_virtqueue, vq); inflight = &svq->inflights[svq->inflight_idx]; kref_get(&inflight->kref); return inflight; } static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight) { kref_put(&inflight->kref, vhost_scsi_done_inflight); } static int vhost_scsi_check_true(struct se_portal_group *se_tpg) { return 1; } static int vhost_scsi_check_false(struct se_portal_group *se_tpg) { return 0; } static char *vhost_scsi_get_fabric_name(void) { return "vhost"; } static u8 vhost_scsi_get_fabric_proto_ident(struct se_portal_group *se_tpg) { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); struct vhost_scsi_tport *tport = tpg->tport; switch (tport->tport_proto_id) { case SCSI_PROTOCOL_SAS: return sas_get_fabric_proto_ident(se_tpg); case SCSI_PROTOCOL_FCP: return fc_get_fabric_proto_ident(se_tpg); case SCSI_PROTOCOL_ISCSI: return iscsi_get_fabric_proto_ident(se_tpg); default: pr_err("Unknown tport_proto_id: 0x%02x, using" " SAS emulation\n", tport->tport_proto_id); break; } return sas_get_fabric_proto_ident(se_tpg); } static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg) { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); struct vhost_scsi_tport *tport = tpg->tport; return &tport->tport_name[0]; } static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg) { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); return tpg->tport_tpgt; } static u32 vhost_scsi_get_default_depth(struct se_portal_group *se_tpg) { return 1; } static u32 vhost_scsi_get_pr_transport_id(struct se_portal_group *se_tpg, struct se_node_acl *se_nacl, struct t10_pr_registration *pr_reg, int *format_code, unsigned char *buf) { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); struct vhost_scsi_tport *tport = tpg->tport; switch (tport->tport_proto_id) { case SCSI_PROTOCOL_SAS: return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, format_code, buf); case SCSI_PROTOCOL_FCP: return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, format_code, buf); case SCSI_PROTOCOL_ISCSI: return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, format_code, buf); default: pr_err("Unknown tport_proto_id: 0x%02x, using" " SAS emulation\n", tport->tport_proto_id); break; } return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, format_code, buf); } static u32 vhost_scsi_get_pr_transport_id_len(struct se_portal_group *se_tpg, struct se_node_acl *se_nacl, struct t10_pr_registration *pr_reg, int *format_code) { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); struct vhost_scsi_tport *tport = tpg->tport; switch (tport->tport_proto_id) { case SCSI_PROTOCOL_SAS: return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, format_code); case SCSI_PROTOCOL_FCP: return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, format_code); case SCSI_PROTOCOL_ISCSI: return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, format_code); default: pr_err("Unknown tport_proto_id: 0x%02x, using" " SAS emulation\n", tport->tport_proto_id); break; } return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, format_code); } static char * vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg, const char *buf, u32 *out_tid_len, char **port_nexus_ptr) { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); struct vhost_scsi_tport *tport = tpg->tport; switch (tport->tport_proto_id) { case SCSI_PROTOCOL_SAS: return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, port_nexus_ptr); case SCSI_PROTOCOL_FCP: return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, port_nexus_ptr); case SCSI_PROTOCOL_ISCSI: return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, port_nexus_ptr); default: pr_err("Unknown tport_proto_id: 0x%02x, using" " SAS emulation\n", tport->tport_proto_id); break; } return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, port_nexus_ptr); } static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg) { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); return tpg->tv_fabric_prot_type; } static struct se_node_acl * vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg) { struct vhost_scsi_nacl *nacl; nacl = kzalloc(sizeof(struct vhost_scsi_nacl), GFP_KERNEL); if (!nacl) { pr_err("Unable to allocate struct vhost_scsi_nacl\n"); return NULL; } return &nacl->se_node_acl; } static void vhost_scsi_release_fabric_acl(struct se_portal_group *se_tpg, struct se_node_acl *se_nacl) { struct vhost_scsi_nacl *nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl); kfree(nacl); } static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg) { return 1; } static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) { struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd, struct vhost_scsi_cmd, tvc_se_cmd); struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess; int i; if (tv_cmd->tvc_sgl_count) { for (i = 0; i < tv_cmd->tvc_sgl_count; i++) put_page(sg_page(&tv_cmd->tvc_sgl[i])); } if (tv_cmd->tvc_prot_sgl_count) { for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++) put_page(sg_page(&tv_cmd->tvc_prot_sgl[i])); } vhost_scsi_put_inflight(tv_cmd->inflight); percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); } static int vhost_scsi_shutdown_session(struct se_session *se_sess) { return 0; } static void vhost_scsi_close_session(struct se_session *se_sess) { return; } static u32 vhost_scsi_sess_get_index(struct se_session *se_sess) { return 0; } static int vhost_scsi_write_pending(struct se_cmd *se_cmd) { /* Go ahead and process the write immediately */ target_execute_cmd(se_cmd); return 0; } static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd) { return 0; } static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl) { return; } static u32 vhost_scsi_get_task_tag(struct se_cmd *se_cmd) { return 0; } static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd) { return 0; } static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd) { struct vhost_scsi *vs = cmd->tvc_vhost; llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list); vhost_work_queue(&vs->dev, &vs->vs_completion_work); } static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd) { struct vhost_scsi_cmd *cmd = container_of(se_cmd, struct vhost_scsi_cmd, tvc_se_cmd); vhost_scsi_complete_cmd(cmd); return 0; } static int vhost_scsi_queue_status(struct se_cmd *se_cmd) { struct vhost_scsi_cmd *cmd = container_of(se_cmd, struct vhost_scsi_cmd, tvc_se_cmd); vhost_scsi_complete_cmd(cmd); return 0; } static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd) { return; } static void vhost_scsi_aborted_task(struct se_cmd *se_cmd) { return; } static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) { vs->vs_events_nr--; kfree(evt); } static struct vhost_scsi_evt * vhost_scsi_allocate_evt(struct vhost_scsi *vs, u32 event, u32 reason) { struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; struct vhost_scsi_evt *evt; if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { vs->vs_events_missed = true; return NULL; } evt = kzalloc(sizeof(*evt), GFP_KERNEL); if (!evt) { vq_err(vq, "Failed to allocate vhost_scsi_evt\n"); vs->vs_events_missed = true; return NULL; } evt->event.event = cpu_to_vhost32(vq, event); evt->event.reason = cpu_to_vhost32(vq, reason); vs->vs_events_nr++; return evt; } static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd) { struct se_cmd *se_cmd = &cmd->tvc_se_cmd; /* TODO locking against target/backend threads? */ transport_generic_free_cmd(se_cmd, 0); } static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd) { return target_put_sess_cmd(se_cmd->se_sess, se_cmd); } static void vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) { struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; struct virtio_scsi_event *event = &evt->event; struct virtio_scsi_event __user *eventp; unsigned out, in; int head, ret; if (!vq->private_data) { vs->vs_events_missed = true; return; } again: vhost_disable_notify(&vs->dev, vq); head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), &out, &in, NULL, NULL); if (head < 0) { vs->vs_events_missed = true; return; } if (head == vq->num) { if (vhost_enable_notify(&vs->dev, vq)) goto again; vs->vs_events_missed = true; return; } if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) { vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n", vq->iov[out].iov_len); vs->vs_events_missed = true; return; } if (vs->vs_events_missed) { event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED); vs->vs_events_missed = false; } eventp = vq->iov[out].iov_base; ret = __copy_to_user(eventp, event, sizeof(*event)); if (!ret) vhost_add_used_and_signal(&vs->dev, vq, head, 0); else vq_err(vq, "Faulted on vhost_scsi_send_event\n"); } static void vhost_scsi_evt_work(struct vhost_work *work) { struct vhost_scsi *vs = container_of(work, struct vhost_scsi, vs_event_work); struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; struct vhost_scsi_evt *evt; struct llist_node *llnode; mutex_lock(&vq->mutex); llnode = llist_del_all(&vs->vs_event_list); while (llnode) { evt = llist_entry(llnode, struct vhost_scsi_evt, list); llnode = llist_next(llnode); vhost_scsi_do_evt_work(vs, evt); vhost_scsi_free_evt(vs, evt); } mutex_unlock(&vq->mutex); } /* Fill in status and signal that we are done processing this command * * This is scheduled in the vhost work queue so we are called with the owner * process mm and can access the vring. */ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) { struct vhost_scsi *vs = container_of(work, struct vhost_scsi, vs_completion_work); DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ); struct virtio_scsi_cmd_resp v_rsp; struct vhost_scsi_cmd *cmd; struct llist_node *llnode; struct se_cmd *se_cmd; struct iov_iter iov_iter; int ret, vq; bitmap_zero(signal, VHOST_SCSI_MAX_VQ); llnode = llist_del_all(&vs->vs_completion_list); while (llnode) { cmd = llist_entry(llnode, struct vhost_scsi_cmd, tvc_completion_list); llnode = llist_next(llnode); se_cmd = &cmd->tvc_se_cmd; pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, cmd, se_cmd->residual_count, se_cmd->scsi_status); memset(&v_rsp, 0, sizeof(v_rsp)); v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count); /* TODO is status_qualifier field needed? */ v_rsp.status = se_cmd->scsi_status; v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq, se_cmd->scsi_sense_length); memcpy(v_rsp.sense, cmd->tvc_sense_buf, se_cmd->scsi_sense_length); iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov, cmd->tvc_in_iovs, sizeof(v_rsp)); ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter); if (likely(ret == sizeof(v_rsp))) { struct vhost_scsi_virtqueue *q; vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0); q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); vq = q - vs->vqs; __set_bit(vq, signal); } else pr_err("Faulted on virtio_scsi_cmd_resp\n"); vhost_scsi_free_cmd(cmd); } vq = -1; while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1)) < VHOST_SCSI_MAX_VQ) vhost_signal(&vs->dev, &vs->vqs[vq].vq); } static struct vhost_scsi_cmd * vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr, u32 exp_data_len, int data_direction) { struct vhost_scsi_cmd *cmd; struct vhost_scsi_nexus *tv_nexus; struct se_session *se_sess; struct scatterlist *sg, *prot_sg; struct page **pages; int tag; tv_nexus = tpg->tpg_nexus; if (!tv_nexus) { pr_err("Unable to locate active struct vhost_scsi_nexus\n"); return ERR_PTR(-EIO); } se_sess = tv_nexus->tvn_se_sess; tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); if (tag < 0) { pr_err("Unable to obtain tag for vhost_scsi_cmd\n"); return ERR_PTR(-ENOMEM); } cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag]; sg = cmd->tvc_sgl; prot_sg = cmd->tvc_prot_sgl; pages = cmd->tvc_upages; memset(cmd, 0, sizeof(struct vhost_scsi_cmd)); cmd->tvc_sgl = sg; cmd->tvc_prot_sgl = prot_sg; cmd->tvc_upages = pages; cmd->tvc_se_cmd.map_tag = tag; cmd->tvc_tag = scsi_tag; cmd->tvc_lun = lun; cmd->tvc_task_attr = task_attr; cmd->tvc_exp_data_len = exp_data_len; cmd->tvc_data_direction = data_direction; cmd->tvc_nexus = tv_nexus; cmd->inflight = vhost_scsi_get_inflight(vq); memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE); return cmd; } /* * Map a user memory range into a scatterlist * * Returns the number of scatterlist entries used or -errno on error. */ static int vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd, void __user *ptr, size_t len, struct scatterlist *sgl, bool write) { unsigned int npages = 0, offset, nbytes; unsigned int pages_nr = iov_num_pages(ptr, len); struct scatterlist *sg = sgl; struct page **pages = cmd->tvc_upages; int ret, i; if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) { pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than" " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n", pages_nr, VHOST_SCSI_PREALLOC_UPAGES); return -ENOBUFS; } ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages); /* No pages were pinned */ if (ret < 0) goto out; /* Less pages pinned than wanted */ if (ret != pages_nr) { for (i = 0; i < ret; i++) put_page(pages[i]); ret = -EFAULT; goto out; } while (len > 0) { offset = (uintptr_t)ptr & ~PAGE_MASK; nbytes = min_t(unsigned int, PAGE_SIZE - offset, len); sg_set_page(sg, pages[npages], nbytes, offset); ptr += nbytes; len -= nbytes; sg++; npages++; } out: return ret; } static int vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls) { int sgl_count = 0; if (!iter || !iter->iov) { pr_err("%s: iter->iov is NULL, but expected bytes: %zu" " present\n", __func__, bytes); return -EINVAL; } sgl_count = iov_iter_npages(iter, 0xffff); if (sgl_count > max_sgls) { pr_err("%s: requested sgl_count: %d exceeds pre-allocated" " max_sgls: %d\n", __func__, sgl_count, max_sgls); return -EINVAL; } return sgl_count; } static int vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write, struct iov_iter *iter, struct scatterlist *sg, int sg_count) { size_t off = iter->iov_offset; int i, ret; for (i = 0; i < iter->nr_segs; i++) { void __user *base = iter->iov[i].iov_base + off; size_t len = iter->iov[i].iov_len - off; ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write); if (ret < 0) { for (i = 0; i < sg_count; i++) { struct page *page = sg_page(&sg[i]); if (page) put_page(page); } return ret; } sg += ret; off = 0; } return 0; } static int vhost_scsi_mapal(struct vhost_scsi_cmd *cmd, size_t prot_bytes, struct iov_iter *prot_iter, size_t data_bytes, struct iov_iter *data_iter) { int sgl_count, ret; bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE); if (prot_bytes) { sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes, VHOST_SCSI_PREALLOC_PROT_SGLS); if (sgl_count < 0) return sgl_count; sg_init_table(cmd->tvc_prot_sgl, sgl_count); cmd->tvc_prot_sgl_count = sgl_count; pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__, cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count); ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter, cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count); if (ret < 0) { cmd->tvc_prot_sgl_count = 0; return ret; } } sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes, VHOST_SCSI_PREALLOC_SGLS); if (sgl_count < 0) return sgl_count; sg_init_table(cmd->tvc_sgl, sgl_count); cmd->tvc_sgl_count = sgl_count; pr_debug("%s data_sg %p data_sgl_count %u\n", __func__, cmd->tvc_sgl, cmd->tvc_sgl_count); ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter, cmd->tvc_sgl, cmd->tvc_sgl_count); if (ret < 0) { cmd->tvc_sgl_count = 0; return ret; } return 0; } static int vhost_scsi_to_tcm_attr(int attr) { switch (attr) { case VIRTIO_SCSI_S_SIMPLE: return TCM_SIMPLE_TAG; case VIRTIO_SCSI_S_ORDERED: return TCM_ORDERED_TAG; case VIRTIO_SCSI_S_HEAD: return TCM_HEAD_TAG; case VIRTIO_SCSI_S_ACA: return TCM_ACA_TAG; default: break; } return TCM_SIMPLE_TAG; } static void vhost_scsi_submission_work(struct work_struct *work) { struct vhost_scsi_cmd *cmd = container_of(work, struct vhost_scsi_cmd, work); struct vhost_scsi_nexus *tv_nexus; struct se_cmd *se_cmd = &cmd->tvc_se_cmd; struct scatterlist *sg_ptr, *sg_prot_ptr = NULL; int rc; /* FIXME: BIDI operation */ if (cmd->tvc_sgl_count) { sg_ptr = cmd->tvc_sgl; if (cmd->tvc_prot_sgl_count) sg_prot_ptr = cmd->tvc_prot_sgl; else se_cmd->prot_pto = true; } else { sg_ptr = NULL; } tv_nexus = cmd->tvc_nexus; rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess, cmd->tvc_cdb, &cmd->tvc_sense_buf[0], cmd->tvc_lun, cmd->tvc_exp_data_len, vhost_scsi_to_tcm_attr(cmd->tvc_task_attr), cmd->tvc_data_direction, TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count); if (rc < 0) { transport_send_check_condition_and_sense(se_cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); transport_generic_free_cmd(se_cmd, 0); } } static void vhost_scsi_send_bad_target(struct vhost_scsi *vs, struct vhost_virtqueue *vq, int head, unsigned out) { struct virtio_scsi_cmd_resp __user *resp; struct virtio_scsi_cmd_resp rsp; int ret; memset(&rsp, 0, sizeof(rsp)); rsp.response = VIRTIO_SCSI_S_BAD_TARGET; resp = vq->iov[out].iov_base; ret = __copy_to_user(resp, &rsp, sizeof(rsp)); if (!ret) vhost_add_used_and_signal(&vs->dev, vq, head, 0); else pr_err("Faulted on virtio_scsi_cmd_resp\n"); } static void vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) { struct vhost_scsi_tpg **vs_tpg, *tpg; struct virtio_scsi_cmd_req v_req; struct virtio_scsi_cmd_req_pi v_req_pi; struct vhost_scsi_cmd *cmd; struct iov_iter out_iter, in_iter, prot_iter, data_iter; u64 tag; u32 exp_data_len, data_direction; unsigned out, in; int head, ret, prot_bytes; size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp); size_t out_size, in_size; u16 lun; u8 *target, *lunp, task_attr; bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI); void *req, *cdb; mutex_lock(&vq->mutex); /* * We can handle the vq only after the endpoint is setup by calling the * VHOST_SCSI_SET_ENDPOINT ioctl. */ vs_tpg = vq->private_data; if (!vs_tpg) goto out; vhost_disable_notify(&vs->dev, vq); for (;;) { head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), &out, &in, NULL, NULL); pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", head, out, in); /* On error, stop handling until the next kick. */ if (unlikely(head < 0)) break; /* Nothing new? Wait for eventfd to tell us they refilled. */ if (head == vq->num) { if (unlikely(vhost_enable_notify(&vs->dev, vq))) { vhost_disable_notify(&vs->dev, vq); continue; } break; } /* * Check for a sane response buffer so we can report early * errors back to the guest. */ if (unlikely(vq->iov[out].iov_len < rsp_size)) { vq_err(vq, "Expecting at least virtio_scsi_cmd_resp" " size, got %zu bytes\n", vq->iov[out].iov_len); break; } /* * Setup pointers and values based upon different virtio-scsi * request header if T10_PI is enabled in KVM guest. */ if (t10_pi) { req = &v_req_pi; req_size = sizeof(v_req_pi); lunp = &v_req_pi.lun[0]; target = &v_req_pi.lun[1]; } else { req = &v_req; req_size = sizeof(v_req); lunp = &v_req.lun[0]; target = &v_req.lun[1]; } /* * FIXME: Not correct for BIDI operation */ out_size = iov_length(vq->iov, out); in_size = iov_length(&vq->iov[out], in); /* * Copy over the virtio-scsi request header, which for a * ANY_LAYOUT enabled guest may span multiple iovecs, or a * single iovec may contain both the header + outgoing * WRITE payloads. * * copy_from_iter() will advance out_iter, so that it will * point at the start of the outgoing WRITE payload, if * DMA_TO_DEVICE is set. */ iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size); ret = copy_from_iter(req, req_size, &out_iter); if (unlikely(ret != req_size)) { vq_err(vq, "Faulted on copy_from_iter\n"); vhost_scsi_send_bad_target(vs, vq, head, out); continue; } /* virtio-scsi spec requires byte 0 of the lun to be 1 */ if (unlikely(*lunp != 1)) { vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp); vhost_scsi_send_bad_target(vs, vq, head, out); continue; } tpg = ACCESS_ONCE(vs_tpg[*target]); if (unlikely(!tpg)) { /* Target does not exist, fail the request */ vhost_scsi_send_bad_target(vs, vq, head, out); continue; } /* * Determine data_direction by calculating the total outgoing * iovec sizes + incoming iovec sizes vs. virtio-scsi request + * response headers respectively. * * For DMA_TO_DEVICE this is out_iter, which is already pointing * to the right place. * * For DMA_FROM_DEVICE, the iovec will be just past the end * of the virtio-scsi response header in either the same * or immediately following iovec. * * Any associated T10_PI bytes for the outgoing / incoming * payloads are included in calculation of exp_data_len here. */ prot_bytes = 0; if (out_size > req_size) { data_direction = DMA_TO_DEVICE; exp_data_len = out_size - req_size; data_iter = out_iter; } else if (in_size > rsp_size) { data_direction = DMA_FROM_DEVICE; exp_data_len = in_size - rsp_size; iov_iter_init(&in_iter, READ, &vq->iov[out], in, rsp_size + exp_data_len); iov_iter_advance(&in_iter, rsp_size); data_iter = in_iter; } else { data_direction = DMA_NONE; exp_data_len = 0; } /* * If T10_PI header + payload is present, setup prot_iter values * and recalculate data_iter for vhost_scsi_mapal() mapping to * host scatterlists via get_user_pages_fast(). */ if (t10_pi) { if (v_req_pi.pi_bytesout) { if (data_direction != DMA_TO_DEVICE) { vq_err(vq, "Received non zero pi_bytesout," " but wrong data_direction\n"); vhost_scsi_send_bad_target(vs, vq, head, out); continue; } prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); } else if (v_req_pi.pi_bytesin) { if (data_direction != DMA_FROM_DEVICE) { vq_err(vq, "Received non zero pi_bytesin," " but wrong data_direction\n"); vhost_scsi_send_bad_target(vs, vq, head, out); continue; } prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); } /* * Set prot_iter to data_iter, and advance past any * preceeding prot_bytes that may be present. * * Also fix up the exp_data_len to reflect only the * actual data payload length. */ if (prot_bytes) { exp_data_len -= prot_bytes; prot_iter = data_iter; iov_iter_advance(&data_iter, prot_bytes); } tag = vhost64_to_cpu(vq, v_req_pi.tag); task_attr = v_req_pi.task_attr; cdb = &v_req_pi.cdb[0]; lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF; } else { tag = vhost64_to_cpu(vq, v_req.tag); task_attr = v_req.task_attr; cdb = &v_req.cdb[0]; lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; } /* * Check that the received CDB size does not exceeded our * hardcoded max for vhost-scsi, then get a pre-allocated * cmd descriptor for the new virtio-scsi tag. * * TODO what if cdb was too small for varlen cdb header? */ if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) { vq_err(vq, "Received SCSI CDB with command_size: %d that" " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE); vhost_scsi_send_bad_target(vs, vq, head, out); continue; } cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, exp_data_len + prot_bytes, data_direction); if (IS_ERR(cmd)) { vq_err(vq, "vhost_scsi_get_tag failed %ld\n", PTR_ERR(cmd)); vhost_scsi_send_bad_target(vs, vq, head, out); continue; } cmd->tvc_vhost = vs; cmd->tvc_vq = vq; cmd->tvc_resp_iov = &vq->iov[out]; cmd->tvc_in_iovs = in; pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", cmd->tvc_cdb[0], cmd->tvc_lun); pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:" " %d\n", cmd, exp_data_len, prot_bytes, data_direction); if (data_direction != DMA_NONE) { ret = vhost_scsi_mapal(cmd, prot_bytes, &prot_iter, exp_data_len, &data_iter); if (unlikely(ret)) { vq_err(vq, "Failed to map iov to sgl\n"); vhost_scsi_release_cmd(&cmd->tvc_se_cmd); vhost_scsi_send_bad_target(vs, vq, head, out); continue; } } /* * Save the descriptor from vhost_get_vq_desc() to be used to * complete the virtio-scsi request in TCM callback context via * vhost_scsi_queue_data_in() and vhost_scsi_queue_status() */ cmd->tvc_vq_desc = head; /* * Dispatch cmd descriptor for cmwq execution in process * context provided by vhost_scsi_workqueue. This also ensures * cmd is executed on the same kworker CPU as this vhost * thread to gain positive L2 cache locality effects. */ INIT_WORK(&cmd->work, vhost_scsi_submission_work); queue_work(vhost_scsi_workqueue, &cmd->work); } out: mutex_unlock(&vq->mutex); } static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) { pr_debug("%s: The handling func for control queue.\n", __func__); } static void vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg, struct se_lun *lun, u32 event, u32 reason) { struct vhost_scsi_evt *evt; evt = vhost_scsi_allocate_evt(vs, event, reason); if (!evt) return; if (tpg && lun) { /* TODO: share lun setup code with virtio-scsi.ko */ /* * Note: evt->event is zeroed when we allocate it and * lun[4-7] need to be zero according to virtio-scsi spec. */ evt->event.lun[0] = 0x01; evt->event.lun[1] = tpg->tport_tpgt; if (lun->unpacked_lun >= 256) evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; evt->event.lun[3] = lun->unpacked_lun & 0xFF; } llist_add(&evt->list, &vs->vs_event_list); vhost_work_queue(&vs->dev, &vs->vs_event_work); } static void vhost_scsi_evt_handle_kick(struct vhost_work *work) { struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, poll.work); struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); mutex_lock(&vq->mutex); if (!vq->private_data) goto out; if (vs->vs_events_missed) vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); out: mutex_unlock(&vq->mutex); } static void vhost_scsi_handle_kick(struct vhost_work *work) { struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, poll.work); struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); vhost_scsi_handle_vq(vs, vq); } static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) { vhost_poll_flush(&vs->vqs[index].vq.poll); } /* Callers must hold dev mutex */ static void vhost_scsi_flush(struct vhost_scsi *vs) { struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ]; int i; /* Init new inflight and remember the old inflight */ vhost_scsi_init_inflight(vs, old_inflight); /* * The inflight->kref was initialized to 1. We decrement it here to * indicate the start of the flush operation so that it will reach 0 * when all the reqs are finished. */ for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight); /* Flush both the vhost poll and vhost work */ for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) vhost_scsi_flush_vq(vs, i); vhost_work_flush(&vs->dev, &vs->vs_completion_work); vhost_work_flush(&vs->dev, &vs->vs_event_work); /* Wait for all reqs issued before the flush to be finished */ for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) wait_for_completion(&old_inflight[i]->comp); } /* * Called from vhost_scsi_ioctl() context to walk the list of available * vhost_scsi_tpg with an active struct vhost_scsi_nexus * * The lock nesting rule is: * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex */ static int vhost_scsi_set_endpoint(struct vhost_scsi *vs, struct vhost_scsi_target *t) { struct se_portal_group *se_tpg; struct vhost_scsi_tport *tv_tport; struct vhost_scsi_tpg *tpg; struct vhost_scsi_tpg **vs_tpg; struct vhost_virtqueue *vq; int index, ret, i, len; bool match = false; mutex_lock(&vhost_scsi_mutex); mutex_lock(&vs->dev.mutex); /* Verify that ring has been setup correctly. */ for (index = 0; index < vs->dev.nvqs; ++index) { /* Verify that ring has been setup correctly. */ if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { ret = -EFAULT; goto out; } } len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; vs_tpg = kzalloc(len, GFP_KERNEL); if (!vs_tpg) { ret = -ENOMEM; goto out; } if (vs->vs_tpg) memcpy(vs_tpg, vs->vs_tpg, len); list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) { mutex_lock(&tpg->tv_tpg_mutex); if (!tpg->tpg_nexus) { mutex_unlock(&tpg->tv_tpg_mutex); continue; } if (tpg->tv_tpg_vhost_count != 0) { mutex_unlock(&tpg->tv_tpg_mutex); continue; } tv_tport = tpg->tport; if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { kfree(vs_tpg); mutex_unlock(&tpg->tv_tpg_mutex); ret = -EEXIST; goto out; } /* * In order to ensure individual vhost-scsi configfs * groups cannot be removed while in use by vhost ioctl, * go ahead and take an explicit se_tpg->tpg_group.cg_item * dependency now. */ se_tpg = &tpg->se_tpg; ret = target_depend_item(&se_tpg->tpg_group.cg_item); if (ret) { pr_warn("configfs_depend_item() failed: %d\n", ret); kfree(vs_tpg); mutex_unlock(&tpg->tv_tpg_mutex); goto out; } tpg->tv_tpg_vhost_count++; tpg->vhost_scsi = vs; vs_tpg[tpg->tport_tpgt] = tpg; smp_mb__after_atomic(); match = true; } mutex_unlock(&tpg->tv_tpg_mutex); } if (match) { memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, sizeof(vs->vs_vhost_wwpn)); for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { vq = &vs->vqs[i].vq; mutex_lock(&vq->mutex); vq->private_data = vs_tpg; vhost_init_used(vq); mutex_unlock(&vq->mutex); } ret = 0; } else { ret = -EEXIST; } /* * Act as synchronize_rcu to make sure access to * old vs->vs_tpg is finished. */ vhost_scsi_flush(vs); kfree(vs->vs_tpg); vs->vs_tpg = vs_tpg; out: mutex_unlock(&vs->dev.mutex); mutex_unlock(&vhost_scsi_mutex); return ret; } static int vhost_scsi_clear_endpoint(struct vhost_scsi *vs, struct vhost_scsi_target *t) { struct se_portal_group *se_tpg; struct vhost_scsi_tport *tv_tport; struct vhost_scsi_tpg *tpg; struct vhost_virtqueue *vq; bool match = false; int index, ret, i; u8 target; mutex_lock(&vhost_scsi_mutex); mutex_lock(&vs->dev.mutex); /* Verify that ring has been setup correctly. */ for (index = 0; index < vs->dev.nvqs; ++index) { if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { ret = -EFAULT; goto err_dev; } } if (!vs->vs_tpg) { ret = 0; goto err_dev; } for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { target = i; tpg = vs->vs_tpg[target]; if (!tpg) continue; mutex_lock(&tpg->tv_tpg_mutex); tv_tport = tpg->tport; if (!tv_tport) { ret = -ENODEV; goto err_tpg; } if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) { pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu" " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n", tv_tport->tport_name, tpg->tport_tpgt, t->vhost_wwpn, t->vhost_tpgt); ret = -EINVAL; goto err_tpg; } tpg->tv_tpg_vhost_count--; tpg->vhost_scsi = NULL; vs->vs_tpg[target] = NULL; match = true; mutex_unlock(&tpg->tv_tpg_mutex); /* * Release se_tpg->tpg_group.cg_item configfs dependency now * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. */ se_tpg = &tpg->se_tpg; target_undepend_item(&se_tpg->tpg_group.cg_item); } if (match) { for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { vq = &vs->vqs[i].vq; mutex_lock(&vq->mutex); vq->private_data = NULL; mutex_unlock(&vq->mutex); } } /* * Act as synchronize_rcu to make sure access to * old vs->vs_tpg is finished. */ vhost_scsi_flush(vs); kfree(vs->vs_tpg); vs->vs_tpg = NULL; WARN_ON(vs->vs_events_nr); mutex_unlock(&vs->dev.mutex); mutex_unlock(&vhost_scsi_mutex); return 0; err_tpg: mutex_unlock(&tpg->tv_tpg_mutex); err_dev: mutex_unlock(&vs->dev.mutex); mutex_unlock(&vhost_scsi_mutex); return ret; } static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) { struct vhost_virtqueue *vq; int i; if (features & ~VHOST_SCSI_FEATURES) return -EOPNOTSUPP; mutex_lock(&vs->dev.mutex); if ((features & (1 << VHOST_F_LOG_ALL)) && !vhost_log_access_ok(&vs->dev)) { mutex_unlock(&vs->dev.mutex); return -EFAULT; } for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { vq = &vs->vqs[i].vq; mutex_lock(&vq->mutex); vq->acked_features = features; mutex_unlock(&vq->mutex); } mutex_unlock(&vs->dev.mutex); return 0; } static int vhost_scsi_open(struct inode *inode, struct file *f) { struct vhost_scsi *vs; struct vhost_virtqueue **vqs; int r = -ENOMEM, i; vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); if (!vs) { vs = vzalloc(sizeof(*vs)); if (!vs) goto err_vs; } vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL); if (!vqs) goto err_vqs; vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work); vs->vs_events_nr = 0; vs->vs_events_missed = false; vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq; vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick; vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick; for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) { vqs[i] = &vs->vqs[i].vq; vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; } vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); vhost_scsi_init_inflight(vs, NULL); f->private_data = vs; return 0; err_vqs: kvfree(vs); err_vs: return r; } static int vhost_scsi_release(struct inode *inode, struct file *f) { struct vhost_scsi *vs = f->private_data; struct vhost_scsi_target t; mutex_lock(&vs->dev.mutex); memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn)); mutex_unlock(&vs->dev.mutex); vhost_scsi_clear_endpoint(vs, &t); vhost_dev_stop(&vs->dev); vhost_dev_cleanup(&vs->dev, false); /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ vhost_scsi_flush(vs); kfree(vs->dev.vqs); kvfree(vs); return 0; } static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, unsigned long arg) { struct vhost_scsi *vs = f->private_data; struct vhost_scsi_target backend; void __user *argp = (void __user *)arg; u64 __user *featurep = argp; u32 __user *eventsp = argp; u32 events_missed; u64 features; int r, abi_version = VHOST_SCSI_ABI_VERSION; struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; switch (ioctl) { case VHOST_SCSI_SET_ENDPOINT: if (copy_from_user(&backend, argp, sizeof backend)) return -EFAULT; if (backend.reserved != 0) return -EOPNOTSUPP; return vhost_scsi_set_endpoint(vs, &backend); case VHOST_SCSI_CLEAR_ENDPOINT: if (copy_from_user(&backend, argp, sizeof backend)) return -EFAULT; if (backend.reserved != 0) return -EOPNOTSUPP; return vhost_scsi_clear_endpoint(vs, &backend); case VHOST_SCSI_GET_ABI_VERSION: if (copy_to_user(argp, &abi_version, sizeof abi_version)) return -EFAULT; return 0; case VHOST_SCSI_SET_EVENTS_MISSED: if (get_user(events_missed, eventsp)) return -EFAULT; mutex_lock(&vq->mutex); vs->vs_events_missed = events_missed; mutex_unlock(&vq->mutex); return 0; case VHOST_SCSI_GET_EVENTS_MISSED: mutex_lock(&vq->mutex); events_missed = vs->vs_events_missed; mutex_unlock(&vq->mutex); if (put_user(events_missed, eventsp)) return -EFAULT; return 0; case VHOST_GET_FEATURES: features = VHOST_SCSI_FEATURES; if (copy_to_user(featurep, &features, sizeof features)) return -EFAULT; return 0; case VHOST_SET_FEATURES: if (copy_from_user(&features, featurep, sizeof features)) return -EFAULT; return vhost_scsi_set_features(vs, features); default: mutex_lock(&vs->dev.mutex); r = vhost_dev_ioctl(&vs->dev, ioctl, argp); /* TODO: flush backend after dev ioctl. */ if (r == -ENOIOCTLCMD) r = vhost_vring_ioctl(&vs->dev, ioctl, argp); mutex_unlock(&vs->dev.mutex); return r; } } #ifdef CONFIG_COMPAT static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl, unsigned long arg) { return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); } #endif static const struct file_operations vhost_scsi_fops = { .owner = THIS_MODULE, .release = vhost_scsi_release, .unlocked_ioctl = vhost_scsi_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = vhost_scsi_compat_ioctl, #endif .open = vhost_scsi_open, .llseek = noop_llseek, }; static struct miscdevice vhost_scsi_misc = { MISC_DYNAMIC_MINOR, "vhost-scsi", &vhost_scsi_fops, }; static int __init vhost_scsi_register(void) { return misc_register(&vhost_scsi_misc); } static int vhost_scsi_deregister(void) { return misc_deregister(&vhost_scsi_misc); } static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport) { switch (tport->tport_proto_id) { case SCSI_PROTOCOL_SAS: return "SAS"; case SCSI_PROTOCOL_FCP: return "FCP"; case SCSI_PROTOCOL_ISCSI: return "iSCSI"; default: break; } return "Unknown"; } static void vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg, struct se_lun *lun, bool plug) { struct vhost_scsi *vs = tpg->vhost_scsi; struct vhost_virtqueue *vq; u32 reason; if (!vs) return; mutex_lock(&vs->dev.mutex); if (plug) reason = VIRTIO_SCSI_EVT_RESET_RESCAN; else reason = VIRTIO_SCSI_EVT_RESET_REMOVED; vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; mutex_lock(&vq->mutex); if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG)) vhost_scsi_send_evt(vs, tpg, lun, VIRTIO_SCSI_T_TRANSPORT_RESET, reason); mutex_unlock(&vq->mutex); mutex_unlock(&vs->dev.mutex); } static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun) { vhost_scsi_do_plug(tpg, lun, true); } static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun) { vhost_scsi_do_plug(tpg, lun, false); } static int vhost_scsi_port_link(struct se_portal_group *se_tpg, struct se_lun *lun) { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); mutex_lock(&vhost_scsi_mutex); mutex_lock(&tpg->tv_tpg_mutex); tpg->tv_tpg_port_count++; mutex_unlock(&tpg->tv_tpg_mutex); vhost_scsi_hotplug(tpg, lun); mutex_unlock(&vhost_scsi_mutex); return 0; } static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg, struct se_lun *lun) { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); mutex_lock(&vhost_scsi_mutex); mutex_lock(&tpg->tv_tpg_mutex); tpg->tv_tpg_port_count--; mutex_unlock(&tpg->tv_tpg_mutex); vhost_scsi_hotunplug(tpg, lun); mutex_unlock(&vhost_scsi_mutex); } static struct se_node_acl * vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg, struct config_group *group, const char *name) { struct se_node_acl *se_nacl, *se_nacl_new; struct vhost_scsi_nacl *nacl; u64 wwpn = 0; u32 nexus_depth; /* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0) return ERR_PTR(-EINVAL); */ se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg); if (!se_nacl_new) return ERR_PTR(-ENOMEM); nexus_depth = 1; /* * se_nacl_new may be released by core_tpg_add_initiator_node_acl() * when converting a NodeACL from demo mode -> explict */ se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, name, nexus_depth); if (IS_ERR(se_nacl)) { vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new); return se_nacl; } /* * Locate our struct vhost_scsi_nacl and set the FC Nport WWPN */ nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl); nacl->iport_wwpn = wwpn; return se_nacl; } static void vhost_scsi_drop_nodeacl(struct se_node_acl *se_acl) { struct vhost_scsi_nacl *nacl = container_of(se_acl, struct vhost_scsi_nacl, se_node_acl); core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); kfree(nacl); } static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus, struct se_session *se_sess) { struct vhost_scsi_cmd *tv_cmd; unsigned int i; if (!se_sess->sess_cmd_map) return; for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) { tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i]; kfree(tv_cmd->tvc_sgl); kfree(tv_cmd->tvc_prot_sgl); kfree(tv_cmd->tvc_upages); } } static ssize_t vhost_scsi_tpg_attrib_store_fabric_prot_type( struct se_portal_group *se_tpg, const char *page, size_t count) { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); unsigned long val; int ret = kstrtoul(page, 0, &val); if (ret) { pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); return ret; } if (val != 0 && val != 1 && val != 3) { pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val); return -EINVAL; } tpg->tv_fabric_prot_type = val; return count; } static ssize_t vhost_scsi_tpg_attrib_show_fabric_prot_type( struct se_portal_group *se_tpg, char *page) { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); return sprintf(page, "%d\n", tpg->tv_fabric_prot_type); } TF_TPG_ATTRIB_ATTR(vhost_scsi, fabric_prot_type, S_IRUGO | S_IWUSR); static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = { &vhost_scsi_tpg_attrib_fabric_prot_type.attr, NULL, }; static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, const char *name) { struct se_portal_group *se_tpg; struct se_session *se_sess; struct vhost_scsi_nexus *tv_nexus; struct vhost_scsi_cmd *tv_cmd; unsigned int i; mutex_lock(&tpg->tv_tpg_mutex); if (tpg->tpg_nexus) { mutex_unlock(&tpg->tv_tpg_mutex); pr_debug("tpg->tpg_nexus already exists\n"); return -EEXIST; } se_tpg = &tpg->se_tpg; tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL); if (!tv_nexus) { mutex_unlock(&tpg->tv_tpg_mutex); pr_err("Unable to allocate struct vhost_scsi_nexus\n"); return -ENOMEM; } /* * Initialize the struct se_session pointer and setup tagpool * for struct vhost_scsi_cmd descriptors */ tv_nexus->tvn_se_sess = transport_init_session_tags( VHOST_SCSI_DEFAULT_TAGS, sizeof(struct vhost_scsi_cmd), TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS); if (IS_ERR(tv_nexus->tvn_se_sess)) { mutex_unlock(&tpg->tv_tpg_mutex); kfree(tv_nexus); return -ENOMEM; } se_sess = tv_nexus->tvn_se_sess; for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) { tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i]; tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) * VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL); if (!tv_cmd->tvc_sgl) { mutex_unlock(&tpg->tv_tpg_mutex); pr_err("Unable to allocate tv_cmd->tvc_sgl\n"); goto out; } tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) * VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL); if (!tv_cmd->tvc_upages) { mutex_unlock(&tpg->tv_tpg_mutex); pr_err("Unable to allocate tv_cmd->tvc_upages\n"); goto out; } tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) * VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL); if (!tv_cmd->tvc_prot_sgl) { mutex_unlock(&tpg->tv_tpg_mutex); pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); goto out; } } /* * Since we are running in 'demo mode' this call with generate a * struct se_node_acl for the vhost_scsi struct se_portal_group with * the SCSI Initiator port name of the passed configfs group 'name'. */ tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( se_tpg, (unsigned char *)name); if (!tv_nexus->tvn_se_sess->se_node_acl) { mutex_unlock(&tpg->tv_tpg_mutex); pr_debug("core_tpg_check_initiator_node_acl() failed" " for %s\n", name); goto out; } /* * Now register the TCM vhost virtual I_T Nexus as active. */ transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, tv_nexus->tvn_se_sess, tv_nexus); tpg->tpg_nexus = tv_nexus; mutex_unlock(&tpg->tv_tpg_mutex); return 0; out: vhost_scsi_free_cmd_map_res(tv_nexus, se_sess); transport_free_session(se_sess); kfree(tv_nexus); return -ENOMEM; } static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg) { struct se_session *se_sess; struct vhost_scsi_nexus *tv_nexus; mutex_lock(&tpg->tv_tpg_mutex); tv_nexus = tpg->tpg_nexus; if (!tv_nexus) { mutex_unlock(&tpg->tv_tpg_mutex); return -ENODEV; } se_sess = tv_nexus->tvn_se_sess; if (!se_sess) { mutex_unlock(&tpg->tv_tpg_mutex); return -ENODEV; } if (tpg->tv_tpg_port_count != 0) { mutex_unlock(&tpg->tv_tpg_mutex); pr_err("Unable to remove TCM_vhost I_T Nexus with" " active TPG port count: %d\n", tpg->tv_tpg_port_count); return -EBUSY; } if (tpg->tv_tpg_vhost_count != 0) { mutex_unlock(&tpg->tv_tpg_mutex); pr_err("Unable to remove TCM_vhost I_T Nexus with" " active TPG vhost count: %d\n", tpg->tv_tpg_vhost_count); return -EBUSY; } pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated" " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport), tv_nexus->tvn_se_sess->se_node_acl->initiatorname); vhost_scsi_free_cmd_map_res(tv_nexus, se_sess); /* * Release the SCSI I_T Nexus to the emulated vhost Target Port */ transport_deregister_session(tv_nexus->tvn_se_sess); tpg->tpg_nexus = NULL; mutex_unlock(&tpg->tv_tpg_mutex); kfree(tv_nexus); return 0; } static ssize_t vhost_scsi_tpg_show_nexus(struct se_portal_group *se_tpg, char *page) { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); struct vhost_scsi_nexus *tv_nexus; ssize_t ret; mutex_lock(&tpg->tv_tpg_mutex); tv_nexus = tpg->tpg_nexus; if (!tv_nexus) { mutex_unlock(&tpg->tv_tpg_mutex); return -ENODEV; } ret = snprintf(page, PAGE_SIZE, "%s\n", tv_nexus->tvn_se_sess->se_node_acl->initiatorname); mutex_unlock(&tpg->tv_tpg_mutex); return ret; } static ssize_t vhost_scsi_tpg_store_nexus(struct se_portal_group *se_tpg, const char *page, size_t count) { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); struct vhost_scsi_tport *tport_wwn = tpg->tport; unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr; int ret; /* * Shutdown the active I_T nexus if 'NULL' is passed.. */ if (!strncmp(page, "NULL", 4)) { ret = vhost_scsi_drop_nexus(tpg); return (!ret) ? count : ret; } /* * Otherwise make sure the passed virtual Initiator port WWN matches * the fabric protocol_id set in vhost_scsi_make_tport(), and call * vhost_scsi_make_nexus(). */ if (strlen(page) >= VHOST_SCSI_NAMELEN) { pr_err("Emulated NAA Sas Address: %s, exceeds" " max: %d\n", page, VHOST_SCSI_NAMELEN); return -EINVAL; } snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page); ptr = strstr(i_port, "naa."); if (ptr) { if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) { pr_err("Passed SAS Initiator Port %s does not" " match target port protoid: %s\n", i_port, vhost_scsi_dump_proto_id(tport_wwn)); return -EINVAL; } port_ptr = &i_port[0]; goto check_newline; } ptr = strstr(i_port, "fc."); if (ptr) { if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) { pr_err("Passed FCP Initiator Port %s does not" " match target port protoid: %s\n", i_port, vhost_scsi_dump_proto_id(tport_wwn)); return -EINVAL; } port_ptr = &i_port[3]; /* Skip over "fc." */ goto check_newline; } ptr = strstr(i_port, "iqn."); if (ptr) { if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) { pr_err("Passed iSCSI Initiator Port %s does not" " match target port protoid: %s\n", i_port, vhost_scsi_dump_proto_id(tport_wwn)); return -EINVAL; } port_ptr = &i_port[0]; goto check_newline; } pr_err("Unable to locate prefix for emulated Initiator Port:" " %s\n", i_port); return -EINVAL; /* * Clear any trailing newline for the NAA WWN */ check_newline: if (i_port[strlen(i_port)-1] == '\n') i_port[strlen(i_port)-1] = '\0'; ret = vhost_scsi_make_nexus(tpg, port_ptr); if (ret < 0) return ret; return count; } TF_TPG_BASE_ATTR(vhost_scsi, nexus, S_IRUGO | S_IWUSR); static struct configfs_attribute *vhost_scsi_tpg_attrs[] = { &vhost_scsi_tpg_nexus.attr, NULL, }; static struct se_portal_group * vhost_scsi_make_tpg(struct se_wwn *wwn, struct config_group *group, const char *name) { struct vhost_scsi_tport *tport = container_of(wwn, struct vhost_scsi_tport, tport_wwn); struct vhost_scsi_tpg *tpg; u16 tpgt; int ret; if (strstr(name, "tpgt_") != name) return ERR_PTR(-EINVAL); if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET) return ERR_PTR(-EINVAL); tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL); if (!tpg) { pr_err("Unable to allocate struct vhost_scsi_tpg"); return ERR_PTR(-ENOMEM); } mutex_init(&tpg->tv_tpg_mutex); INIT_LIST_HEAD(&tpg->tv_tpg_list); tpg->tport = tport; tpg->tport_tpgt = tpgt; ret = core_tpg_register(&vhost_scsi_ops, wwn, &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); if (ret < 0) { kfree(tpg); return NULL; } mutex_lock(&vhost_scsi_mutex); list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list); mutex_unlock(&vhost_scsi_mutex); return &tpg->se_tpg; } static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg) { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); mutex_lock(&vhost_scsi_mutex); list_del(&tpg->tv_tpg_list); mutex_unlock(&vhost_scsi_mutex); /* * Release the virtual I_T Nexus for this vhost TPG */ vhost_scsi_drop_nexus(tpg); /* * Deregister the se_tpg from TCM.. */ core_tpg_deregister(se_tpg); kfree(tpg); } static struct se_wwn * vhost_scsi_make_tport(struct target_fabric_configfs *tf, struct config_group *group, const char *name) { struct vhost_scsi_tport *tport; char *ptr; u64 wwpn = 0; int off = 0; /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0) return ERR_PTR(-EINVAL); */ tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL); if (!tport) { pr_err("Unable to allocate struct vhost_scsi_tport"); return ERR_PTR(-ENOMEM); } tport->tport_wwpn = wwpn; /* * Determine the emulated Protocol Identifier and Target Port Name * based on the incoming configfs directory name. */ ptr = strstr(name, "naa."); if (ptr) { tport->tport_proto_id = SCSI_PROTOCOL_SAS; goto check_len; } ptr = strstr(name, "fc."); if (ptr) { tport->tport_proto_id = SCSI_PROTOCOL_FCP; off = 3; /* Skip over "fc." */ goto check_len; } ptr = strstr(name, "iqn."); if (ptr) { tport->tport_proto_id = SCSI_PROTOCOL_ISCSI; goto check_len; } pr_err("Unable to locate prefix for emulated Target Port:" " %s\n", name); kfree(tport); return ERR_PTR(-EINVAL); check_len: if (strlen(name) >= VHOST_SCSI_NAMELEN) { pr_err("Emulated %s Address: %s, exceeds" " max: %d\n", name, vhost_scsi_dump_proto_id(tport), VHOST_SCSI_NAMELEN); kfree(tport); return ERR_PTR(-EINVAL); } snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]); pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target" " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name); return &tport->tport_wwn; } static void vhost_scsi_drop_tport(struct se_wwn *wwn) { struct vhost_scsi_tport *tport = container_of(wwn, struct vhost_scsi_tport, tport_wwn); pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target" " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), tport->tport_name); kfree(tport); } static ssize_t vhost_scsi_wwn_show_attr_version(struct target_fabric_configfs *tf, char *page) { return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, utsname()->machine); } TF_WWN_ATTR_RO(vhost_scsi, version); static struct configfs_attribute *vhost_scsi_wwn_attrs[] = { &vhost_scsi_wwn_version.attr, NULL, }; static struct target_core_fabric_ops vhost_scsi_ops = { .module = THIS_MODULE, .name = "vhost", .get_fabric_name = vhost_scsi_get_fabric_name, .get_fabric_proto_ident = vhost_scsi_get_fabric_proto_ident, .tpg_get_wwn = vhost_scsi_get_fabric_wwn, .tpg_get_tag = vhost_scsi_get_tpgt, .tpg_get_default_depth = vhost_scsi_get_default_depth, .tpg_get_pr_transport_id = vhost_scsi_get_pr_transport_id, .tpg_get_pr_transport_id_len = vhost_scsi_get_pr_transport_id_len, .tpg_parse_pr_out_transport_id = vhost_scsi_parse_pr_out_transport_id, .tpg_check_demo_mode = vhost_scsi_check_true, .tpg_check_demo_mode_cache = vhost_scsi_check_true, .tpg_check_demo_mode_write_protect = vhost_scsi_check_false, .tpg_check_prod_mode_write_protect = vhost_scsi_check_false, .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only, .tpg_alloc_fabric_acl = vhost_scsi_alloc_fabric_acl, .tpg_release_fabric_acl = vhost_scsi_release_fabric_acl, .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index, .release_cmd = vhost_scsi_release_cmd, .check_stop_free = vhost_scsi_check_stop_free, .shutdown_session = vhost_scsi_shutdown_session, .close_session = vhost_scsi_close_session, .sess_get_index = vhost_scsi_sess_get_index, .sess_get_initiator_sid = NULL, .write_pending = vhost_scsi_write_pending, .write_pending_status = vhost_scsi_write_pending_status, .set_default_node_attributes = vhost_scsi_set_default_node_attrs, .get_task_tag = vhost_scsi_get_task_tag, .get_cmd_state = vhost_scsi_get_cmd_state, .queue_data_in = vhost_scsi_queue_data_in, .queue_status = vhost_scsi_queue_status, .queue_tm_rsp = vhost_scsi_queue_tm_rsp, .aborted_task = vhost_scsi_aborted_task, /* * Setup callers for generic logic in target_core_fabric_configfs.c */ .fabric_make_wwn = vhost_scsi_make_tport, .fabric_drop_wwn = vhost_scsi_drop_tport, .fabric_make_tpg = vhost_scsi_make_tpg, .fabric_drop_tpg = vhost_scsi_drop_tpg, .fabric_post_link = vhost_scsi_port_link, .fabric_pre_unlink = vhost_scsi_port_unlink, .fabric_make_np = NULL, .fabric_drop_np = NULL, .fabric_make_nodeacl = vhost_scsi_make_nodeacl, .fabric_drop_nodeacl = vhost_scsi_drop_nodeacl, .tfc_wwn_attrs = vhost_scsi_wwn_attrs, .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs, .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs, }; static int __init vhost_scsi_init(void) { int ret = -ENOMEM; pr_debug("TCM_VHOST fabric module %s on %s/%s" " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, utsname()->machine); /* * Use our own dedicated workqueue for submitting I/O into * target core to avoid contention within system_wq. */ vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0); if (!vhost_scsi_workqueue) goto out; ret = vhost_scsi_register(); if (ret < 0) goto out_destroy_workqueue; ret = target_register_template(&vhost_scsi_ops); if (ret < 0) goto out_vhost_scsi_deregister; return 0; out_vhost_scsi_deregister: vhost_scsi_deregister(); out_destroy_workqueue: destroy_workqueue(vhost_scsi_workqueue); out: return ret; }; static void vhost_scsi_exit(void) { target_unregister_template(&vhost_scsi_ops); vhost_scsi_deregister(); destroy_workqueue(vhost_scsi_workqueue); }; MODULE_DESCRIPTION("VHOST_SCSI series fabric driver"); MODULE_ALIAS("tcm_vhost"); MODULE_LICENSE("GPL"); module_init(vhost_scsi_init); module_exit(vhost_scsi_exit);
gpl-2.0
amitsirius/linux-3.2.71_sirius
drivers/video/backlight/adp5520_bl.c
162
10588
/* * Backlight driver for Analog Devices ADP5520/ADP5501 MFD PMICs * * Copyright 2009 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/mfd/adp5520.h> #include <linux/slab.h> #include <linux/module.h> struct adp5520_bl { struct device *master; struct adp5520_backlight_platform_data *pdata; struct mutex lock; unsigned long cached_daylight_max; int id; int current_brightness; }; static int adp5520_bl_set(struct backlight_device *bl, int brightness) { struct adp5520_bl *data = bl_get_data(bl); struct device *master = data->master; int ret = 0; if (data->pdata->en_ambl_sens) { if ((brightness > 0) && (brightness < ADP5020_MAX_BRIGHTNESS)) { /* Disable Ambient Light auto adjust */ ret |= adp5520_clr_bits(master, ADP5520_BL_CONTROL, ADP5520_BL_AUTO_ADJ); ret |= adp5520_write(master, ADP5520_DAYLIGHT_MAX, brightness); } else { /* * MAX_BRIGHTNESS -> Enable Ambient Light auto adjust * restore daylight l3 sysfs brightness */ ret |= adp5520_write(master, ADP5520_DAYLIGHT_MAX, data->cached_daylight_max); ret |= adp5520_set_bits(master, ADP5520_BL_CONTROL, ADP5520_BL_AUTO_ADJ); } } else { ret |= adp5520_write(master, ADP5520_DAYLIGHT_MAX, brightness); } if (data->current_brightness && brightness == 0) ret |= adp5520_set_bits(master, ADP5520_MODE_STATUS, ADP5520_DIM_EN); else if (data->current_brightness == 0 && brightness) ret |= adp5520_clr_bits(master, ADP5520_MODE_STATUS, ADP5520_DIM_EN); if (!ret) data->current_brightness = brightness; return ret; } static int adp5520_bl_update_status(struct backlight_device *bl) { int brightness = bl->props.brightness; if (bl->props.power != FB_BLANK_UNBLANK) brightness = 0; if (bl->props.fb_blank != FB_BLANK_UNBLANK) brightness = 0; return adp5520_bl_set(bl, brightness); } static int adp5520_bl_get_brightness(struct backlight_device *bl) { struct adp5520_bl *data = bl_get_data(bl); int error; uint8_t reg_val; error = adp5520_read(data->master, ADP5520_BL_VALUE, &reg_val); return error ? data->current_brightness : reg_val; } static const struct backlight_ops adp5520_bl_ops = { .update_status = adp5520_bl_update_status, .get_brightness = adp5520_bl_get_brightness, }; static int adp5520_bl_setup(struct backlight_device *bl) { struct adp5520_bl *data = bl_get_data(bl); struct device *master = data->master; struct adp5520_backlight_platform_data *pdata = data->pdata; int ret = 0; ret |= adp5520_write(master, ADP5520_DAYLIGHT_MAX, pdata->l1_daylight_max); ret |= adp5520_write(master, ADP5520_DAYLIGHT_DIM, pdata->l1_daylight_dim); if (pdata->en_ambl_sens) { data->cached_daylight_max = pdata->l1_daylight_max; ret |= adp5520_write(master, ADP5520_OFFICE_MAX, pdata->l2_office_max); ret |= adp5520_write(master, ADP5520_OFFICE_DIM, pdata->l2_office_dim); ret |= adp5520_write(master, ADP5520_DARK_MAX, pdata->l3_dark_max); ret |= adp5520_write(master, ADP5520_DARK_DIM, pdata->l3_dark_dim); ret |= adp5520_write(master, ADP5520_L2_TRIP, pdata->l2_trip); ret |= adp5520_write(master, ADP5520_L2_HYS, pdata->l2_hyst); ret |= adp5520_write(master, ADP5520_L3_TRIP, pdata->l3_trip); ret |= adp5520_write(master, ADP5520_L3_HYS, pdata->l3_hyst); ret |= adp5520_write(master, ADP5520_ALS_CMPR_CFG, ALS_CMPR_CFG_VAL(pdata->abml_filt, ADP5520_L3_EN)); } ret |= adp5520_write(master, ADP5520_BL_CONTROL, BL_CTRL_VAL(pdata->fade_led_law, pdata->en_ambl_sens)); ret |= adp5520_write(master, ADP5520_BL_FADE, FADE_VAL(pdata->fade_in, pdata->fade_out)); ret |= adp5520_set_bits(master, ADP5520_MODE_STATUS, ADP5520_BL_EN | ADP5520_DIM_EN); return ret; } static ssize_t adp5520_show(struct device *dev, char *buf, int reg) { struct adp5520_bl *data = dev_get_drvdata(dev); int error; uint8_t reg_val; mutex_lock(&data->lock); error = adp5520_read(data->master, reg, &reg_val); mutex_unlock(&data->lock); return sprintf(buf, "%u\n", reg_val); } static ssize_t adp5520_store(struct device *dev, const char *buf, size_t count, int reg) { struct adp5520_bl *data = dev_get_drvdata(dev); unsigned long val; int ret; ret = strict_strtoul(buf, 10, &val); if (ret) return ret; mutex_lock(&data->lock); adp5520_write(data->master, reg, val); mutex_unlock(&data->lock); return count; } static ssize_t adp5520_bl_dark_max_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp5520_show(dev, buf, ADP5520_DARK_MAX); } static ssize_t adp5520_bl_dark_max_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return adp5520_store(dev, buf, count, ADP5520_DARK_MAX); } static DEVICE_ATTR(dark_max, 0664, adp5520_bl_dark_max_show, adp5520_bl_dark_max_store); static ssize_t adp5520_bl_office_max_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp5520_show(dev, buf, ADP5520_OFFICE_MAX); } static ssize_t adp5520_bl_office_max_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return adp5520_store(dev, buf, count, ADP5520_OFFICE_MAX); } static DEVICE_ATTR(office_max, 0664, adp5520_bl_office_max_show, adp5520_bl_office_max_store); static ssize_t adp5520_bl_daylight_max_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp5520_show(dev, buf, ADP5520_DAYLIGHT_MAX); } static ssize_t adp5520_bl_daylight_max_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct adp5520_bl *data = dev_get_drvdata(dev); int ret; ret = strict_strtoul(buf, 10, &data->cached_daylight_max); if (ret < 0) return ret; return adp5520_store(dev, buf, count, ADP5520_DAYLIGHT_MAX); } static DEVICE_ATTR(daylight_max, 0664, adp5520_bl_daylight_max_show, adp5520_bl_daylight_max_store); static ssize_t adp5520_bl_dark_dim_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp5520_show(dev, buf, ADP5520_DARK_DIM); } static ssize_t adp5520_bl_dark_dim_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return adp5520_store(dev, buf, count, ADP5520_DARK_DIM); } static DEVICE_ATTR(dark_dim, 0664, adp5520_bl_dark_dim_show, adp5520_bl_dark_dim_store); static ssize_t adp5520_bl_office_dim_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp5520_show(dev, buf, ADP5520_OFFICE_DIM); } static ssize_t adp5520_bl_office_dim_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return adp5520_store(dev, buf, count, ADP5520_OFFICE_DIM); } static DEVICE_ATTR(office_dim, 0664, adp5520_bl_office_dim_show, adp5520_bl_office_dim_store); static ssize_t adp5520_bl_daylight_dim_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp5520_show(dev, buf, ADP5520_DAYLIGHT_DIM); } static ssize_t adp5520_bl_daylight_dim_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return adp5520_store(dev, buf, count, ADP5520_DAYLIGHT_DIM); } static DEVICE_ATTR(daylight_dim, 0664, adp5520_bl_daylight_dim_show, adp5520_bl_daylight_dim_store); static struct attribute *adp5520_bl_attributes[] = { &dev_attr_dark_max.attr, &dev_attr_dark_dim.attr, &dev_attr_office_max.attr, &dev_attr_office_dim.attr, &dev_attr_daylight_max.attr, &dev_attr_daylight_dim.attr, NULL }; static const struct attribute_group adp5520_bl_attr_group = { .attrs = adp5520_bl_attributes, }; static int __devinit adp5520_bl_probe(struct platform_device *pdev) { struct backlight_properties props; struct backlight_device *bl; struct adp5520_bl *data; int ret = 0; data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) return -ENOMEM; data->master = pdev->dev.parent; data->pdata = pdev->dev.platform_data; if (data->pdata == NULL) { dev_err(&pdev->dev, "missing platform data\n"); kfree(data); return -ENODEV; } data->id = pdev->id; data->current_brightness = 0; mutex_init(&data->lock); memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = ADP5020_MAX_BRIGHTNESS; bl = backlight_device_register(pdev->name, data->master, data, &adp5520_bl_ops, &props); if (IS_ERR(bl)) { dev_err(&pdev->dev, "failed to register backlight\n"); kfree(data); return PTR_ERR(bl); } bl->props.brightness = ADP5020_MAX_BRIGHTNESS; if (data->pdata->en_ambl_sens) ret = sysfs_create_group(&bl->dev.kobj, &adp5520_bl_attr_group); if (ret) { dev_err(&pdev->dev, "failed to register sysfs\n"); backlight_device_unregister(bl); kfree(data); } platform_set_drvdata(pdev, bl); ret |= adp5520_bl_setup(bl); backlight_update_status(bl); return ret; } static int __devexit adp5520_bl_remove(struct platform_device *pdev) { struct backlight_device *bl = platform_get_drvdata(pdev); struct adp5520_bl *data = bl_get_data(bl); adp5520_clr_bits(data->master, ADP5520_MODE_STATUS, ADP5520_BL_EN); if (data->pdata->en_ambl_sens) sysfs_remove_group(&bl->dev.kobj, &adp5520_bl_attr_group); backlight_device_unregister(bl); kfree(data); return 0; } #ifdef CONFIG_PM static int adp5520_bl_suspend(struct platform_device *pdev, pm_message_t state) { struct backlight_device *bl = platform_get_drvdata(pdev); return adp5520_bl_set(bl, 0); } static int adp5520_bl_resume(struct platform_device *pdev) { struct backlight_device *bl = platform_get_drvdata(pdev); backlight_update_status(bl); return 0; } #else #define adp5520_bl_suspend NULL #define adp5520_bl_resume NULL #endif static struct platform_driver adp5520_bl_driver = { .driver = { .name = "adp5520-backlight", .owner = THIS_MODULE, }, .probe = adp5520_bl_probe, .remove = __devexit_p(adp5520_bl_remove), .suspend = adp5520_bl_suspend, .resume = adp5520_bl_resume, }; static int __init adp5520_bl_init(void) { return platform_driver_register(&adp5520_bl_driver); } module_init(adp5520_bl_init); static void __exit adp5520_bl_exit(void) { platform_driver_unregister(&adp5520_bl_driver); } module_exit(adp5520_bl_exit); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("ADP5520(01) Backlight Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:adp5520-backlight");
gpl-2.0
bfg-repo-cleaner-demos/linux-original
kernel/irq/irqdomain.c
162
26063
#define pr_fmt(fmt) "irq: " fmt #include <linux/debugfs.h> #include <linux/hardirq.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdesc.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/topology.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/fs.h> #define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs. * ie. legacy 8259, gets irqs 1..15 */ #define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */ #define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */ #define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */ static LIST_HEAD(irq_domain_list); static DEFINE_MUTEX(irq_domain_mutex); static DEFINE_MUTEX(revmap_trees_mutex); static struct irq_domain *irq_default_domain; /** * irq_domain_alloc() - Allocate a new irq_domain data structure * @of_node: optional device-tree node of the interrupt controller * @revmap_type: type of reverse mapping to use * @ops: map/unmap domain callbacks * @host_data: Controller private data pointer * * Allocates and initialize and irq_domain structure. Caller is expected to * register allocated irq_domain with irq_domain_register(). Returns pointer * to IRQ domain, or NULL on failure. */ static struct irq_domain *irq_domain_alloc(struct device_node *of_node, unsigned int revmap_type, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain *domain; domain = kzalloc_node(sizeof(*domain), GFP_KERNEL, of_node_to_nid(of_node)); if (WARN_ON(!domain)) return NULL; /* Fill structure */ domain->revmap_type = revmap_type; domain->ops = ops; domain->host_data = host_data; domain->of_node = of_node_get(of_node); return domain; } static void irq_domain_free(struct irq_domain *domain) { of_node_put(domain->of_node); kfree(domain); } static void irq_domain_add(struct irq_domain *domain) { mutex_lock(&irq_domain_mutex); list_add(&domain->link, &irq_domain_list); mutex_unlock(&irq_domain_mutex); pr_debug("Allocated domain of type %d @0x%p\n", domain->revmap_type, domain); } /** * irq_domain_remove() - Remove an irq domain. * @domain: domain to remove * * This routine is used to remove an irq domain. The caller must ensure * that all mappings within the domain have been disposed of prior to * use, depending on the revmap type. */ void irq_domain_remove(struct irq_domain *domain) { mutex_lock(&irq_domain_mutex); switch (domain->revmap_type) { case IRQ_DOMAIN_MAP_LEGACY: /* * Legacy domains don't manage their own irq_desc * allocations, we expect the caller to handle irq_desc * freeing on their own. */ break; case IRQ_DOMAIN_MAP_TREE: /* * radix_tree_delete() takes care of destroying the root * node when all entries are removed. Shout if there are * any mappings left. */ WARN_ON(domain->revmap_data.tree.height); break; case IRQ_DOMAIN_MAP_LINEAR: kfree(domain->revmap_data.linear.revmap); domain->revmap_data.linear.size = 0; break; case IRQ_DOMAIN_MAP_NOMAP: break; } list_del(&domain->link); /* * If the going away domain is the default one, reset it. */ if (unlikely(irq_default_domain == domain)) irq_set_default_host(NULL); mutex_unlock(&irq_domain_mutex); pr_debug("Removed domain of type %d @0x%p\n", domain->revmap_type, domain); irq_domain_free(domain); } EXPORT_SYMBOL_GPL(irq_domain_remove); static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain, irq_hw_number_t hwirq) { irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq; int size = domain->revmap_data.legacy.size; if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size)) return 0; return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq; } /** * irq_domain_add_simple() - Allocate and register a simple irq_domain. * @of_node: pointer to interrupt controller's device tree node. * @size: total number of irqs in mapping * @first_irq: first number of irq block assigned to the domain * @ops: map/unmap domain callbacks * @host_data: Controller private data pointer * * Allocates a legacy irq_domain if irq_base is positive or a linear * domain otherwise. For the legacy domain, IRQ descriptors will also * be allocated. * * This is intended to implement the expected behaviour for most * interrupt controllers which is that a linear mapping should * normally be used unless the system requires a legacy mapping in * order to support supplying interrupt numbers during non-DT * registration of devices. */ struct irq_domain *irq_domain_add_simple(struct device_node *of_node, unsigned int size, unsigned int first_irq, const struct irq_domain_ops *ops, void *host_data) { if (first_irq > 0) { int irq_base; if (IS_ENABLED(CONFIG_SPARSE_IRQ)) { /* * Set the descriptor allocator to search for a * 1-to-1 mapping, such as irq_alloc_desc_at(). * Use of_node_to_nid() which is defined to * numa_node_id() on platforms that have no custom * implementation. */ irq_base = irq_alloc_descs(first_irq, first_irq, size, of_node_to_nid(of_node)); if (irq_base < 0) { pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", first_irq); irq_base = first_irq; } } else irq_base = first_irq; return irq_domain_add_legacy(of_node, size, irq_base, 0, ops, host_data); } /* A linear domain is the default */ return irq_domain_add_linear(of_node, size, ops, host_data); } /** * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. * @of_node: pointer to interrupt controller's device tree node. * @size: total number of irqs in legacy mapping * @first_irq: first number of irq block assigned to the domain * @first_hwirq: first hwirq number to use for the translation. Should normally * be '0', but a positive integer can be used if the effective * hwirqs numbering does not begin at zero. * @ops: map/unmap domain callbacks * @host_data: Controller private data pointer * * Note: the map() callback will be called before this function returns * for all legacy interrupts except 0 (which is always the invalid irq for * a legacy controller). */ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, unsigned int size, unsigned int first_irq, irq_hw_number_t first_hwirq, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain *domain; unsigned int i; domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data); if (!domain) return NULL; domain->revmap_data.legacy.first_irq = first_irq; domain->revmap_data.legacy.first_hwirq = first_hwirq; domain->revmap_data.legacy.size = size; mutex_lock(&irq_domain_mutex); /* Verify that all the irqs are available */ for (i = 0; i < size; i++) { int irq = first_irq + i; struct irq_data *irq_data = irq_get_irq_data(irq); if (WARN_ON(!irq_data || irq_data->domain)) { mutex_unlock(&irq_domain_mutex); irq_domain_free(domain); return NULL; } } /* Claim all of the irqs before registering a legacy domain */ for (i = 0; i < size; i++) { struct irq_data *irq_data = irq_get_irq_data(first_irq + i); irq_data->hwirq = first_hwirq + i; irq_data->domain = domain; } mutex_unlock(&irq_domain_mutex); for (i = 0; i < size; i++) { int irq = first_irq + i; int hwirq = first_hwirq + i; /* IRQ0 gets ignored */ if (!irq) continue; /* Legacy flags are left to default at this point, * one can then use irq_create_mapping() to * explicitly change them */ if (ops->map) ops->map(domain, irq, hwirq); /* Clear norequest flags */ irq_clear_status_flags(irq, IRQ_NOREQUEST); } irq_domain_add(domain); return domain; } EXPORT_SYMBOL_GPL(irq_domain_add_legacy); /** * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain. * @of_node: pointer to interrupt controller's device tree node. * @size: Number of interrupts in the domain. * @ops: map/unmap domain callbacks * @host_data: Controller private data pointer */ struct irq_domain *irq_domain_add_linear(struct device_node *of_node, unsigned int size, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain *domain; unsigned int *revmap; revmap = kzalloc_node(sizeof(*revmap) * size, GFP_KERNEL, of_node_to_nid(of_node)); if (WARN_ON(!revmap)) return NULL; domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data); if (!domain) { kfree(revmap); return NULL; } domain->revmap_data.linear.size = size; domain->revmap_data.linear.revmap = revmap; irq_domain_add(domain); return domain; } EXPORT_SYMBOL_GPL(irq_domain_add_linear); struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, unsigned int max_irq, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain *domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_NOMAP, ops, host_data); if (domain) { domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0; irq_domain_add(domain); } return domain; } EXPORT_SYMBOL_GPL(irq_domain_add_nomap); /** * irq_domain_add_tree() * @of_node: pointer to interrupt controller's device tree node. * @ops: map/unmap domain callbacks * * Note: The radix tree will be allocated later during boot automatically * (the reverse mapping will use the slow path until that happens). */ struct irq_domain *irq_domain_add_tree(struct device_node *of_node, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain *domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_TREE, ops, host_data); if (domain) { INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL); irq_domain_add(domain); } return domain; } EXPORT_SYMBOL_GPL(irq_domain_add_tree); /** * irq_find_host() - Locates a domain for a given device node * @node: device-tree node of the interrupt controller */ struct irq_domain *irq_find_host(struct device_node *node) { struct irq_domain *h, *found = NULL; int rc; /* We might want to match the legacy controller last since * it might potentially be set to match all interrupts in * the absence of a device node. This isn't a problem so far * yet though... */ mutex_lock(&irq_domain_mutex); list_for_each_entry(h, &irq_domain_list, link) { if (h->ops->match) rc = h->ops->match(h, node); else rc = (h->of_node != NULL) && (h->of_node == node); if (rc) { found = h; break; } } mutex_unlock(&irq_domain_mutex); return found; } EXPORT_SYMBOL_GPL(irq_find_host); /** * irq_set_default_host() - Set a "default" irq domain * @domain: default domain pointer * * For convenience, it's possible to set a "default" domain that will be used * whenever NULL is passed to irq_create_mapping(). It makes life easier for * platforms that want to manipulate a few hard coded interrupt numbers that * aren't properly represented in the device-tree. */ void irq_set_default_host(struct irq_domain *domain) { pr_debug("Default domain set to @0x%p\n", domain); irq_default_domain = domain; } EXPORT_SYMBOL_GPL(irq_set_default_host); static void irq_domain_disassociate_many(struct irq_domain *domain, unsigned int irq_base, int count) { /* * disassociate in reverse order; * not strictly necessary, but nice for unwinding */ while (count--) { int irq = irq_base + count; struct irq_data *irq_data = irq_get_irq_data(irq); irq_hw_number_t hwirq = irq_data->hwirq; if (WARN_ON(!irq_data || irq_data->domain != domain)) continue; irq_set_status_flags(irq, IRQ_NOREQUEST); /* remove chip and handler */ irq_set_chip_and_handler(irq, NULL, NULL); /* Make sure it's completed */ synchronize_irq(irq); /* Tell the PIC about it */ if (domain->ops->unmap) domain->ops->unmap(domain, irq); smp_mb(); irq_data->domain = NULL; irq_data->hwirq = 0; /* Clear reverse map */ switch(domain->revmap_type) { case IRQ_DOMAIN_MAP_LINEAR: if (hwirq < domain->revmap_data.linear.size) domain->revmap_data.linear.revmap[hwirq] = 0; break; case IRQ_DOMAIN_MAP_TREE: mutex_lock(&revmap_trees_mutex); radix_tree_delete(&domain->revmap_data.tree, hwirq); mutex_unlock(&revmap_trees_mutex); break; } } } int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, irq_hw_number_t hwirq_base, int count) { unsigned int virq = irq_base; irq_hw_number_t hwirq = hwirq_base; int i, ret; pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count); for (i = 0; i < count; i++) { struct irq_data *irq_data = irq_get_irq_data(virq + i); if (WARN(!irq_data, "error: irq_desc not allocated; " "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i)) return -EINVAL; if (WARN(irq_data->domain, "error: irq_desc already associated; " "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i)) return -EINVAL; }; for (i = 0; i < count; i++, virq++, hwirq++) { struct irq_data *irq_data = irq_get_irq_data(virq); irq_data->hwirq = hwirq; irq_data->domain = domain; if (domain->ops->map) { ret = domain->ops->map(domain, virq, hwirq); if (ret != 0) { pr_err("irq-%i==>hwirq-0x%lx mapping failed: %d\n", virq, hwirq, ret); WARN_ON(1); irq_data->domain = NULL; irq_data->hwirq = 0; goto err_unmap; } } switch (domain->revmap_type) { case IRQ_DOMAIN_MAP_LINEAR: if (hwirq < domain->revmap_data.linear.size) domain->revmap_data.linear.revmap[hwirq] = virq; break; case IRQ_DOMAIN_MAP_TREE: mutex_lock(&revmap_trees_mutex); radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data); mutex_unlock(&revmap_trees_mutex); break; } irq_clear_status_flags(virq, IRQ_NOREQUEST); } return 0; err_unmap: irq_domain_disassociate_many(domain, irq_base, i); return -EINVAL; } EXPORT_SYMBOL_GPL(irq_domain_associate_many); /** * irq_create_direct_mapping() - Allocate an irq for direct mapping * @domain: domain to allocate the irq for or NULL for default domain * * This routine is used for irq controllers which can choose the hardware * interrupt numbers they generate. In such a case it's simplest to use * the linux irq as the hardware interrupt number. */ unsigned int irq_create_direct_mapping(struct irq_domain *domain) { unsigned int virq; if (domain == NULL) domain = irq_default_domain; if (WARN_ON(!domain || domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP)) return 0; virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); if (!virq) { pr_debug("create_direct virq allocation failed\n"); return 0; } if (virq >= domain->revmap_data.nomap.max_irq) { pr_err("ERROR: no free irqs available below %i maximum\n", domain->revmap_data.nomap.max_irq); irq_free_desc(virq); return 0; } pr_debug("create_direct obtained virq %d\n", virq); if (irq_domain_associate(domain, virq, virq)) { irq_free_desc(virq); return 0; } return virq; } EXPORT_SYMBOL_GPL(irq_create_direct_mapping); /** * irq_create_mapping() - Map a hardware interrupt into linux irq space * @domain: domain owning this hardware interrupt or NULL for default domain * @hwirq: hardware irq number in that domain space * * Only one mapping per hardware interrupt is permitted. Returns a linux * irq number. * If the sense/trigger is to be specified, set_irq_type() should be called * on the number returned from that call. */ unsigned int irq_create_mapping(struct irq_domain *domain, irq_hw_number_t hwirq) { unsigned int hint; int virq; pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); /* Look for default domain if nececssary */ if (domain == NULL) domain = irq_default_domain; if (domain == NULL) { pr_warning("irq_create_mapping called for" " NULL domain, hwirq=%lx\n", hwirq); WARN_ON(1); return 0; } pr_debug("-> using domain @%p\n", domain); /* Check if mapping already exists */ virq = irq_find_mapping(domain, hwirq); if (virq) { pr_debug("-> existing mapping on virq %d\n", virq); return virq; } /* Get a virtual interrupt number */ if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) return irq_domain_legacy_revmap(domain, hwirq); /* Allocate a virtual interrupt number */ hint = hwirq % nr_irqs; if (hint == 0) hint++; virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node)); if (virq <= 0) virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); if (virq <= 0) { pr_debug("-> virq allocation failed\n"); return 0; } if (irq_domain_associate(domain, virq, hwirq)) { irq_free_desc(virq); return 0; } pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", hwirq, of_node_full_name(domain->of_node), virq); return virq; } EXPORT_SYMBOL_GPL(irq_create_mapping); /** * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs * @domain: domain owning the interrupt range * @irq_base: beginning of linux IRQ range * @hwirq_base: beginning of hardware IRQ range * @count: Number of interrupts to map * * This routine is used for allocating and mapping a range of hardware * irqs to linux irqs where the linux irq numbers are at pre-defined * locations. For use by controllers that already have static mappings * to insert in to the domain. * * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time * domain insertion. * * 0 is returned upon success, while any failure to establish a static * mapping is treated as an error. */ int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base, irq_hw_number_t hwirq_base, int count) { int ret; ret = irq_alloc_descs(irq_base, irq_base, count, of_node_to_nid(domain->of_node)); if (unlikely(ret < 0)) return ret; ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count); if (unlikely(ret < 0)) { irq_free_descs(irq_base, count); return ret; } return 0; } EXPORT_SYMBOL_GPL(irq_create_strict_mappings); unsigned int irq_create_of_mapping(struct device_node *controller, const u32 *intspec, unsigned int intsize) { struct irq_domain *domain; irq_hw_number_t hwirq; unsigned int type = IRQ_TYPE_NONE; unsigned int virq; domain = controller ? irq_find_host(controller) : irq_default_domain; if (!domain) { #ifdef CONFIG_MIPS /* * Workaround to avoid breaking interrupt controller drivers * that don't yet register an irq_domain. This is temporary * code. ~~~gcl, Feb 24, 2012 * * Scheduled for removal in Linux v3.6. That should be enough * time. */ if (intsize > 0) return intspec[0]; #endif pr_warning("no irq domain found for %s !\n", of_node_full_name(controller)); return 0; } /* If domain has no translation, then we assume interrupt line */ if (domain->ops->xlate == NULL) hwirq = intspec[0]; else { if (domain->ops->xlate(domain, controller, intspec, intsize, &hwirq, &type)) return 0; } /* Create mapping */ virq = irq_create_mapping(domain, hwirq); if (!virq) return virq; /* Set type if specified and different than the current one */ if (type != IRQ_TYPE_NONE && type != (irqd_get_trigger_type(irq_get_irq_data(virq)))) irq_set_irq_type(virq, type); return virq; } EXPORT_SYMBOL_GPL(irq_create_of_mapping); /** * irq_dispose_mapping() - Unmap an interrupt * @virq: linux irq number of the interrupt to unmap */ void irq_dispose_mapping(unsigned int virq) { struct irq_data *irq_data = irq_get_irq_data(virq); struct irq_domain *domain; if (!virq || !irq_data) return; domain = irq_data->domain; if (WARN_ON(domain == NULL)) return; /* Never unmap legacy interrupts */ if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) return; irq_domain_disassociate_many(domain, virq, 1); irq_free_desc(virq); } EXPORT_SYMBOL_GPL(irq_dispose_mapping); /** * irq_find_mapping() - Find a linux irq from an hw irq number. * @domain: domain owning this hardware interrupt * @hwirq: hardware irq number in that domain space */ unsigned int irq_find_mapping(struct irq_domain *domain, irq_hw_number_t hwirq) { struct irq_data *data; /* Look for default domain if nececssary */ if (domain == NULL) domain = irq_default_domain; if (domain == NULL) return 0; switch (domain->revmap_type) { case IRQ_DOMAIN_MAP_LEGACY: return irq_domain_legacy_revmap(domain, hwirq); case IRQ_DOMAIN_MAP_LINEAR: return irq_linear_revmap(domain, hwirq); case IRQ_DOMAIN_MAP_TREE: rcu_read_lock(); data = radix_tree_lookup(&domain->revmap_data.tree, hwirq); rcu_read_unlock(); if (data) return data->irq; break; case IRQ_DOMAIN_MAP_NOMAP: data = irq_get_irq_data(hwirq); if (data && (data->domain == domain) && (data->hwirq == hwirq)) return hwirq; break; } return 0; } EXPORT_SYMBOL_GPL(irq_find_mapping); /** * irq_linear_revmap() - Find a linux irq from a hw irq number. * @domain: domain owning this hardware interrupt * @hwirq: hardware irq number in that domain space * * This is a fast path that can be called directly by irq controller code to * save a handful of instructions. */ unsigned int irq_linear_revmap(struct irq_domain *domain, irq_hw_number_t hwirq) { BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR); /* Check revmap bounds; complain if exceeded */ if (WARN_ON(hwirq >= domain->revmap_data.linear.size)) return 0; return domain->revmap_data.linear.revmap[hwirq]; } EXPORT_SYMBOL_GPL(irq_linear_revmap); #ifdef CONFIG_IRQ_DOMAIN_DEBUG static int virq_debug_show(struct seq_file *m, void *private) { unsigned long flags; struct irq_desc *desc; const char *p; static const char none[] = "none"; void *data; int i; seq_printf(m, "%-5s %-7s %-15s %-*s %s\n", "irq", "hwirq", "chip name", (int)(2 * sizeof(void *) + 2), "chip data", "domain name"); for (i = 1; i < nr_irqs; i++) { desc = irq_to_desc(i); if (!desc) continue; raw_spin_lock_irqsave(&desc->lock, flags); if (desc->action && desc->action->handler) { struct irq_chip *chip; seq_printf(m, "%5d ", i); seq_printf(m, "0x%05lx ", desc->irq_data.hwirq); chip = irq_desc_get_chip(desc); if (chip && chip->name) p = chip->name; else p = none; seq_printf(m, "%-15s ", p); data = irq_desc_get_chip_data(desc); seq_printf(m, data ? "0x%p " : " %p ", data); if (desc->irq_data.domain) p = of_node_full_name(desc->irq_data.domain->of_node); else p = none; seq_printf(m, "%s\n", p); } raw_spin_unlock_irqrestore(&desc->lock, flags); } return 0; } static int virq_debug_open(struct inode *inode, struct file *file) { return single_open(file, virq_debug_show, inode->i_private); } static const struct file_operations virq_debug_fops = { .open = virq_debug_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init irq_debugfs_init(void) { if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL, NULL, &virq_debug_fops) == NULL) return -ENOMEM; return 0; } __initcall(irq_debugfs_init); #endif /* CONFIG_IRQ_DOMAIN_DEBUG */ /** * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings * * Device Tree IRQ specifier translation function which works with one cell * bindings where the cell value maps directly to the hwirq number. */ int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type) { if (WARN_ON(intsize < 1)) return -EINVAL; *out_hwirq = intspec[0]; *out_type = IRQ_TYPE_NONE; return 0; } EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell); /** * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings * * Device Tree IRQ specifier translation function which works with two cell * bindings where the cell values map directly to the hwirq number * and linux irq flags. */ int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type) { if (WARN_ON(intsize < 2)) return -EINVAL; *out_hwirq = intspec[0]; *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; return 0; } EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell); /** * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings * * Device Tree IRQ specifier translation function which works with either one * or two cell bindings where the cell values map directly to the hwirq number * and linux irq flags. * * Note: don't use this function unless your interrupt controller explicitly * supports both one and two cell bindings. For the majority of controllers * the _onecell() or _twocell() variants above should be used. */ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type) { if (WARN_ON(intsize < 1)) return -EINVAL; *out_hwirq = intspec[0]; *out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE; return 0; } EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell); const struct irq_domain_ops irq_domain_simple_ops = { .xlate = irq_domain_xlate_onetwocell, }; EXPORT_SYMBOL_GPL(irq_domain_simple_ops); #ifdef CONFIG_OF_IRQ void irq_domain_generate_simple(const struct of_device_id *match, u64 phys_base, unsigned int irq_start) { struct device_node *node; pr_debug("looking for phys_base=%llx, irq_start=%i\n", (unsigned long long) phys_base, (int) irq_start); node = of_find_matching_node_by_address(NULL, match, phys_base); if (node) irq_domain_add_legacy(node, 32, irq_start, 0, &irq_domain_simple_ops, NULL); } EXPORT_SYMBOL_GPL(irq_domain_generate_simple); #endif
gpl-2.0
derekhe/huawei-g330d-u8825d-kernel
drivers/usb/gadget/f_ncm.c
418
37546
/* * f_ncm.c -- USB CDC Network (NCM) link function driver * * Copyright (C) 2010 Nokia Corporation * Contact: Yauheni Kaliuta <yauheni.kaliuta@nokia.com> * * The driver borrows from f_ecm.c which is: * * Copyright (C) 2003-2005,2008 David Brownell * Copyright (C) 2008 Nokia Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/etherdevice.h> #include <linux/crc32.h> #include <linux/usb/cdc.h> #include "u_ether.h" /* * This function is a "CDC Network Control Model" (CDC NCM) Ethernet link. * NCM is intended to be used with high-speed network attachments. * * Note that NCM requires the use of "alternate settings" for its data * interface. This means that the set_alt() method has real work to do, * and also means that a get_alt() method is required. */ /* to trigger crc/non-crc ndp signature */ #define NCM_NDP_HDR_CRC_MASK 0x01000000 #define NCM_NDP_HDR_CRC 0x01000000 #define NCM_NDP_HDR_NOCRC 0x00000000 enum ncm_notify_state { NCM_NOTIFY_NONE, /* don't notify */ NCM_NOTIFY_CONNECT, /* issue CONNECT next */ NCM_NOTIFY_SPEED, /* issue SPEED_CHANGE next */ }; struct f_ncm { struct gether port; u8 ctrl_id, data_id; char ethaddr[14]; struct usb_ep *notify; struct usb_request *notify_req; u8 notify_state; bool is_open; struct ndp_parser_opts *parser_opts; bool is_crc; /* * for notification, it is accessed from both * callback and ethernet open/close */ spinlock_t lock; }; static inline struct f_ncm *func_to_ncm(struct usb_function *f) { return container_of(f, struct f_ncm, port.func); } /* peak (theoretical) bulk transfer rate in bits-per-second */ static inline unsigned ncm_bitrate(struct usb_gadget *g) { if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) return 13 * 512 * 8 * 1000 * 8; else return 19 * 64 * 1 * 1000 * 8; } /*-------------------------------------------------------------------------*/ /* * We cannot group frames so use just the minimal size which ok to put * one max-size ethernet frame. * If the host can group frames, allow it to do that, 16K is selected, * because it's used by default by the current linux host driver */ #define NTB_DEFAULT_IN_SIZE USB_CDC_NCM_NTB_MIN_IN_SIZE #define NTB_OUT_SIZE 16384 /* * skbs of size less than that will not be aligned * to NCM's dwNtbInMaxSize to save bus bandwidth */ #define MAX_TX_NONFIXED (512 * 3) #define FORMATS_SUPPORTED (USB_CDC_NCM_NTB16_SUPPORTED | \ USB_CDC_NCM_NTB32_SUPPORTED) static struct usb_cdc_ncm_ntb_parameters ntb_parameters = { .wLength = sizeof ntb_parameters, .bmNtbFormatsSupported = cpu_to_le16(FORMATS_SUPPORTED), .dwNtbInMaxSize = cpu_to_le32(NTB_DEFAULT_IN_SIZE), .wNdpInDivisor = cpu_to_le16(4), .wNdpInPayloadRemainder = cpu_to_le16(0), .wNdpInAlignment = cpu_to_le16(4), .dwNtbOutMaxSize = cpu_to_le32(NTB_OUT_SIZE), .wNdpOutDivisor = cpu_to_le16(4), .wNdpOutPayloadRemainder = cpu_to_le16(0), .wNdpOutAlignment = cpu_to_le16(4), }; /* * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one * packet, to simplify cancellation; and a big transfer interval, to * waste less bandwidth. */ #define LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */ #define NCM_STATUS_BYTECOUNT 16 /* 8 byte header + data */ static struct usb_interface_assoc_descriptor ncm_iad_desc __initdata = { .bLength = sizeof ncm_iad_desc, .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, /* .bFirstInterface = DYNAMIC, */ .bInterfaceCount = 2, /* control + data */ .bFunctionClass = USB_CLASS_COMM, .bFunctionSubClass = USB_CDC_SUBCLASS_NCM, .bFunctionProtocol = USB_CDC_PROTO_NONE, /* .iFunction = DYNAMIC */ }; /* interface descriptor: */ static struct usb_interface_descriptor ncm_control_intf __initdata = { .bLength = sizeof ncm_control_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bNumEndpoints = 1, .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, .bInterfaceProtocol = USB_CDC_PROTO_NONE, /* .iInterface = DYNAMIC */ }; static struct usb_cdc_header_desc ncm_header_desc __initdata = { .bLength = sizeof ncm_header_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_HEADER_TYPE, .bcdCDC = cpu_to_le16(0x0110), }; static struct usb_cdc_union_desc ncm_union_desc __initdata = { .bLength = sizeof(ncm_union_desc), .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_UNION_TYPE, /* .bMasterInterface0 = DYNAMIC */ /* .bSlaveInterface0 = DYNAMIC */ }; static struct usb_cdc_ether_desc ecm_desc __initdata = { .bLength = sizeof ecm_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_ETHERNET_TYPE, /* this descriptor actually adds value, surprise! */ /* .iMACAddress = DYNAMIC */ .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */ .wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN), .wNumberMCFilters = cpu_to_le16(0), .bNumberPowerFilters = 0, }; #define NCAPS (USB_CDC_NCM_NCAP_ETH_FILTER | USB_CDC_NCM_NCAP_CRC_MODE) static struct usb_cdc_ncm_desc ncm_desc __initdata = { .bLength = sizeof ncm_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_NCM_TYPE, .bcdNcmVersion = cpu_to_le16(0x0100), /* can process SetEthernetPacketFilter */ .bmNetworkCapabilities = NCAPS, }; /* the default data interface has no endpoints ... */ static struct usb_interface_descriptor ncm_data_nop_intf __initdata = { .bLength = sizeof ncm_data_nop_intf, .bDescriptorType = USB_DT_INTERFACE, .bInterfaceNumber = 1, .bAlternateSetting = 0, .bNumEndpoints = 0, .bInterfaceClass = USB_CLASS_CDC_DATA, .bInterfaceSubClass = 0, .bInterfaceProtocol = USB_CDC_NCM_PROTO_NTB, /* .iInterface = DYNAMIC */ }; /* ... but the "real" data interface has two bulk endpoints */ static struct usb_interface_descriptor ncm_data_intf __initdata = { .bLength = sizeof ncm_data_intf, .bDescriptorType = USB_DT_INTERFACE, .bInterfaceNumber = 1, .bAlternateSetting = 1, .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_CDC_DATA, .bInterfaceSubClass = 0, .bInterfaceProtocol = USB_CDC_NCM_PROTO_NTB, /* .iInterface = DYNAMIC */ }; /* full speed support: */ static struct usb_endpoint_descriptor fs_ncm_notify_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT), .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC, }; static struct usb_endpoint_descriptor fs_ncm_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor fs_ncm_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_descriptor_header *ncm_fs_function[] __initdata = { (struct usb_descriptor_header *) &ncm_iad_desc, /* CDC NCM control descriptors */ (struct usb_descriptor_header *) &ncm_control_intf, (struct usb_descriptor_header *) &ncm_header_desc, (struct usb_descriptor_header *) &ncm_union_desc, (struct usb_descriptor_header *) &ecm_desc, (struct usb_descriptor_header *) &ncm_desc, (struct usb_descriptor_header *) &fs_ncm_notify_desc, /* data interface, altsettings 0 and 1 */ (struct usb_descriptor_header *) &ncm_data_nop_intf, (struct usb_descriptor_header *) &ncm_data_intf, (struct usb_descriptor_header *) &fs_ncm_in_desc, (struct usb_descriptor_header *) &fs_ncm_out_desc, NULL, }; /* high speed support: */ static struct usb_endpoint_descriptor hs_ncm_notify_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT), .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4, }; static struct usb_endpoint_descriptor hs_ncm_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_endpoint_descriptor hs_ncm_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_descriptor_header *ncm_hs_function[] __initdata = { (struct usb_descriptor_header *) &ncm_iad_desc, /* CDC NCM control descriptors */ (struct usb_descriptor_header *) &ncm_control_intf, (struct usb_descriptor_header *) &ncm_header_desc, (struct usb_descriptor_header *) &ncm_union_desc, (struct usb_descriptor_header *) &ecm_desc, (struct usb_descriptor_header *) &ncm_desc, (struct usb_descriptor_header *) &hs_ncm_notify_desc, /* data interface, altsettings 0 and 1 */ (struct usb_descriptor_header *) &ncm_data_nop_intf, (struct usb_descriptor_header *) &ncm_data_intf, (struct usb_descriptor_header *) &hs_ncm_in_desc, (struct usb_descriptor_header *) &hs_ncm_out_desc, NULL, }; /* string descriptors: */ #define STRING_CTRL_IDX 0 #define STRING_MAC_IDX 1 #define STRING_DATA_IDX 2 #define STRING_IAD_IDX 3 static struct usb_string ncm_string_defs[] = { [STRING_CTRL_IDX].s = "CDC Network Control Model (NCM)", [STRING_MAC_IDX].s = NULL /* DYNAMIC */, [STRING_DATA_IDX].s = "CDC Network Data", [STRING_IAD_IDX].s = "CDC NCM", { } /* end of list */ }; static struct usb_gadget_strings ncm_string_table = { .language = 0x0409, /* en-us */ .strings = ncm_string_defs, }; static struct usb_gadget_strings *ncm_strings[] = { &ncm_string_table, NULL, }; /* * Here are options for NCM Datagram Pointer table (NDP) parser. * There are 2 different formats: NDP16 and NDP32 in the spec (ch. 3), * in NDP16 offsets and sizes fields are 1 16bit word wide, * in NDP32 -- 2 16bit words wide. Also signatures are different. * To make the parser code the same, put the differences in the structure, * and switch pointers to the structures when the format is changed. */ struct ndp_parser_opts { u32 nth_sign; u32 ndp_sign; unsigned nth_size; unsigned ndp_size; unsigned ndplen_align; /* sizes in u16 units */ unsigned dgram_item_len; /* index or length */ unsigned block_length; unsigned fp_index; unsigned reserved1; unsigned reserved2; unsigned next_fp_index; }; #define INIT_NDP16_OPTS { \ .nth_sign = USB_CDC_NCM_NTH16_SIGN, \ .ndp_sign = USB_CDC_NCM_NDP16_NOCRC_SIGN, \ .nth_size = sizeof(struct usb_cdc_ncm_nth16), \ .ndp_size = sizeof(struct usb_cdc_ncm_ndp16), \ .ndplen_align = 4, \ .dgram_item_len = 1, \ .block_length = 1, \ .fp_index = 1, \ .reserved1 = 0, \ .reserved2 = 0, \ .next_fp_index = 1, \ } #define INIT_NDP32_OPTS { \ .nth_sign = USB_CDC_NCM_NTH32_SIGN, \ .ndp_sign = USB_CDC_NCM_NDP32_NOCRC_SIGN, \ .nth_size = sizeof(struct usb_cdc_ncm_nth32), \ .ndp_size = sizeof(struct usb_cdc_ncm_ndp32), \ .ndplen_align = 8, \ .dgram_item_len = 2, \ .block_length = 2, \ .fp_index = 2, \ .reserved1 = 1, \ .reserved2 = 2, \ .next_fp_index = 2, \ } static struct ndp_parser_opts ndp16_opts = INIT_NDP16_OPTS; static struct ndp_parser_opts ndp32_opts = INIT_NDP32_OPTS; static inline void put_ncm(__le16 **p, unsigned size, unsigned val) { switch (size) { case 1: put_unaligned_le16((u16)val, *p); break; case 2: put_unaligned_le32((u32)val, *p); break; default: BUG(); } *p += size; } static inline unsigned get_ncm(__le16 **p, unsigned size) { unsigned tmp; switch (size) { case 1: tmp = get_unaligned_le16(*p); break; case 2: tmp = get_unaligned_le32(*p); break; default: BUG(); } *p += size; return tmp; } /*-------------------------------------------------------------------------*/ static inline void ncm_reset_values(struct f_ncm *ncm) { ncm->parser_opts = &ndp16_opts; ncm->is_crc = false; ncm->port.cdc_filter = DEFAULT_FILTER; /* doesn't make sense for ncm, fixed size used */ ncm->port.header_len = 0; ncm->port.fixed_out_len = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); ncm->port.fixed_in_len = NTB_DEFAULT_IN_SIZE; } /* * Context: ncm->lock held */ static void ncm_do_notify(struct f_ncm *ncm) { struct usb_request *req = ncm->notify_req; struct usb_cdc_notification *event; struct usb_composite_dev *cdev = ncm->port.func.config->cdev; __le32 *data; int status; /* notification already in flight? */ if (!req) return; event = req->buf; switch (ncm->notify_state) { case NCM_NOTIFY_NONE: return; case NCM_NOTIFY_CONNECT: event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION; if (ncm->is_open) event->wValue = cpu_to_le16(1); else event->wValue = cpu_to_le16(0); event->wLength = 0; req->length = sizeof *event; DBG(cdev, "notify connect %s\n", ncm->is_open ? "true" : "false"); ncm->notify_state = NCM_NOTIFY_NONE; break; case NCM_NOTIFY_SPEED: event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE; event->wValue = cpu_to_le16(0); event->wLength = cpu_to_le16(8); req->length = NCM_STATUS_BYTECOUNT; /* SPEED_CHANGE data is up/down speeds in bits/sec */ data = req->buf + sizeof *event; data[0] = cpu_to_le32(ncm_bitrate(cdev->gadget)); data[1] = data[0]; DBG(cdev, "notify speed %d\n", ncm_bitrate(cdev->gadget)); ncm->notify_state = NCM_NOTIFY_CONNECT; break; } event->bmRequestType = 0xA1; event->wIndex = cpu_to_le16(ncm->ctrl_id); ncm->notify_req = NULL; /* * In double buffering if there is a space in FIFO, * completion callback can be called right after the call, * so unlocking */ spin_unlock(&ncm->lock); status = usb_ep_queue(ncm->notify, req, GFP_ATOMIC); spin_lock(&ncm->lock); if (status < 0) { ncm->notify_req = req; DBG(cdev, "notify --> %d\n", status); } } /* * Context: ncm->lock held */ static void ncm_notify(struct f_ncm *ncm) { /* * NOTE on most versions of Linux, host side cdc-ethernet * won't listen for notifications until its netdevice opens. * The first notification then sits in the FIFO for a long * time, and the second one is queued. * * If ncm_notify() is called before the second (CONNECT) * notification is sent, then it will reset to send the SPEED * notificaion again (and again, and again), but it's not a problem */ ncm->notify_state = NCM_NOTIFY_SPEED; ncm_do_notify(ncm); } static void ncm_notify_complete(struct usb_ep *ep, struct usb_request *req) { struct f_ncm *ncm = req->context; struct usb_composite_dev *cdev = ncm->port.func.config->cdev; struct usb_cdc_notification *event = req->buf; spin_lock(&ncm->lock); switch (req->status) { case 0: VDBG(cdev, "Notification %02x sent\n", event->bNotificationType); break; case -ECONNRESET: case -ESHUTDOWN: ncm->notify_state = NCM_NOTIFY_NONE; break; default: DBG(cdev, "event %02x --> %d\n", event->bNotificationType, req->status); break; } ncm->notify_req = req; ncm_do_notify(ncm); spin_unlock(&ncm->lock); } static void ncm_ep0out_complete(struct usb_ep *ep, struct usb_request *req) { /* now for SET_NTB_INPUT_SIZE only */ unsigned in_size; struct usb_function *f = req->context; struct f_ncm *ncm = func_to_ncm(f); struct usb_composite_dev *cdev = ep->driver_data; req->context = NULL; if (req->status || req->actual != req->length) { DBG(cdev, "Bad control-OUT transfer\n"); goto invalid; } in_size = get_unaligned_le32(req->buf); if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE || in_size > le32_to_cpu(ntb_parameters.dwNtbInMaxSize)) { DBG(cdev, "Got wrong INPUT SIZE (%d) from host\n", in_size); goto invalid; } ncm->port.fixed_in_len = in_size; VDBG(cdev, "Set NTB INPUT SIZE %d\n", in_size); return; invalid: usb_ep_set_halt(ep); return; } static int ncm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct f_ncm *ncm = func_to_ncm(f); struct usb_composite_dev *cdev = f->config->cdev; struct usb_request *req = cdev->req; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); /* * composite driver infrastructure handles everything except * CDC class messages; interface activation uses set_alt(). */ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SET_ETHERNET_PACKET_FILTER: /* * see 6.2.30: no data, wIndex = interface, * wValue = packet filter bitmap */ if (w_length != 0 || w_index != ncm->ctrl_id) goto invalid; DBG(cdev, "packet filter %02x\n", w_value); /* * REVISIT locking of cdc_filter. This assumes the UDC * driver won't have a concurrent packet TX irq running on * another CPU; or that if it does, this write is atomic... */ ncm->port.cdc_filter = w_value; value = 0; break; /* * and optionally: * case USB_CDC_SEND_ENCAPSULATED_COMMAND: * case USB_CDC_GET_ENCAPSULATED_RESPONSE: * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS: * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER: * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER: * case USB_CDC_GET_ETHERNET_STATISTIC: */ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_NTB_PARAMETERS: if (w_length == 0 || w_value != 0 || w_index != ncm->ctrl_id) goto invalid; value = w_length > sizeof ntb_parameters ? sizeof ntb_parameters : w_length; memcpy(req->buf, &ntb_parameters, value); VDBG(cdev, "Host asked NTB parameters\n"); break; case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_NTB_INPUT_SIZE: if (w_length < 4 || w_value != 0 || w_index != ncm->ctrl_id) goto invalid; put_unaligned_le32(ncm->port.fixed_in_len, req->buf); value = 4; VDBG(cdev, "Host asked INPUT SIZE, sending %d\n", ncm->port.fixed_in_len); break; case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SET_NTB_INPUT_SIZE: { if (w_length != 4 || w_value != 0 || w_index != ncm->ctrl_id) goto invalid; req->complete = ncm_ep0out_complete; req->length = w_length; req->context = f; value = req->length; break; } case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_NTB_FORMAT: { uint16_t format; if (w_length < 2 || w_value != 0 || w_index != ncm->ctrl_id) goto invalid; format = (ncm->parser_opts == &ndp16_opts) ? 0x0000 : 0x0001; put_unaligned_le16(format, req->buf); value = 2; VDBG(cdev, "Host asked NTB FORMAT, sending %d\n", format); break; } case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SET_NTB_FORMAT: { if (w_length != 0 || w_index != ncm->ctrl_id) goto invalid; switch (w_value) { case 0x0000: ncm->parser_opts = &ndp16_opts; DBG(cdev, "NCM16 selected\n"); break; case 0x0001: ncm->parser_opts = &ndp32_opts; DBG(cdev, "NCM32 selected\n"); break; default: goto invalid; } value = 0; break; } case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_CRC_MODE: { uint16_t is_crc; if (w_length < 2 || w_value != 0 || w_index != ncm->ctrl_id) goto invalid; is_crc = ncm->is_crc ? 0x0001 : 0x0000; put_unaligned_le16(is_crc, req->buf); value = 2; VDBG(cdev, "Host asked CRC MODE, sending %d\n", is_crc); break; } case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SET_CRC_MODE: { int ndp_hdr_crc = 0; if (w_length != 0 || w_index != ncm->ctrl_id) goto invalid; switch (w_value) { case 0x0000: ncm->is_crc = false; ndp_hdr_crc = NCM_NDP_HDR_NOCRC; DBG(cdev, "non-CRC mode selected\n"); break; case 0x0001: ncm->is_crc = true; ndp_hdr_crc = NCM_NDP_HDR_CRC; DBG(cdev, "CRC mode selected\n"); break; default: goto invalid; } ncm->parser_opts->ndp_sign &= ~NCM_NDP_HDR_CRC_MASK; ncm->parser_opts->ndp_sign |= ndp_hdr_crc; value = 0; break; } /* and disabled in ncm descriptor: */ /* case USB_CDC_GET_NET_ADDRESS: */ /* case USB_CDC_SET_NET_ADDRESS: */ /* case USB_CDC_GET_MAX_DATAGRAM_SIZE: */ /* case USB_CDC_SET_MAX_DATAGRAM_SIZE: */ default: invalid: DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); } /* respond with data transfer or status phase? */ if (value >= 0) { DBG(cdev, "ncm req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); req->zero = 0; req->length = value; value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); if (value < 0) ERROR(cdev, "ncm req %02x.%02x response err %d\n", ctrl->bRequestType, ctrl->bRequest, value); } /* device either stalls (value < 0) or reports success */ return value; } static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_ncm *ncm = func_to_ncm(f); struct usb_composite_dev *cdev = f->config->cdev; /* Control interface has only altsetting 0 */ if (intf == ncm->ctrl_id) { if (alt != 0) goto fail; if (ncm->notify->driver_data) { DBG(cdev, "reset ncm control %d\n", intf); usb_ep_disable(ncm->notify); } if (!(ncm->notify->desc)) { DBG(cdev, "init ncm ctrl %d\n", intf); if (config_ep_by_speed(cdev->gadget, f, ncm->notify)) goto fail; } usb_ep_enable(ncm->notify); ncm->notify->driver_data = ncm; /* Data interface has two altsettings, 0 and 1 */ } else if (intf == ncm->data_id) { if (alt > 1) goto fail; if (ncm->port.in_ep->driver_data) { DBG(cdev, "reset ncm\n"); gether_disconnect(&ncm->port); ncm_reset_values(ncm); } /* * CDC Network only sends data in non-default altsettings. * Changing altsettings resets filters, statistics, etc. */ if (alt == 1) { struct net_device *net; if (!ncm->port.in_ep->desc || !ncm->port.out_ep->desc) { DBG(cdev, "init ncm\n"); if (config_ep_by_speed(cdev->gadget, f, ncm->port.in_ep) || config_ep_by_speed(cdev->gadget, f, ncm->port.out_ep)) { ncm->port.in_ep->desc = NULL; ncm->port.out_ep->desc = NULL; goto fail; } } /* TODO */ /* Enable zlps by default for NCM conformance; * override for musb_hdrc (avoids txdma ovhead) */ ncm->port.is_zlp_ok = !( gadget_is_musbhdrc(cdev->gadget) ); ncm->port.cdc_filter = DEFAULT_FILTER; DBG(cdev, "activate ncm\n"); net = gether_connect(&ncm->port); if (IS_ERR(net)) return PTR_ERR(net); } spin_lock(&ncm->lock); ncm_notify(ncm); spin_unlock(&ncm->lock); } else goto fail; return 0; fail: return -EINVAL; } /* * Because the data interface supports multiple altsettings, * this NCM function *MUST* implement a get_alt() method. */ static int ncm_get_alt(struct usb_function *f, unsigned intf) { struct f_ncm *ncm = func_to_ncm(f); if (intf == ncm->ctrl_id) return 0; return ncm->port.in_ep->driver_data ? 1 : 0; } static struct sk_buff *ncm_wrap_ntb(struct gether *port, struct sk_buff *skb) { struct f_ncm *ncm = func_to_ncm(&port->func); struct sk_buff *skb2; int ncb_len = 0; __le16 *tmp; int div = ntb_parameters.wNdpInDivisor; int rem = ntb_parameters.wNdpInPayloadRemainder; int pad; int ndp_align = ntb_parameters.wNdpInAlignment; int ndp_pad; unsigned max_size = ncm->port.fixed_in_len; struct ndp_parser_opts *opts = ncm->parser_opts; unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; ncb_len += opts->nth_size; ndp_pad = ALIGN(ncb_len, ndp_align) - ncb_len; ncb_len += ndp_pad; ncb_len += opts->ndp_size; ncb_len += 2 * 2 * opts->dgram_item_len; /* Datagram entry */ ncb_len += 2 * 2 * opts->dgram_item_len; /* Zero datagram entry */ pad = ALIGN(ncb_len, div) + rem - ncb_len; ncb_len += pad; if (ncb_len + skb->len + crc_len > max_size) { dev_kfree_skb_any(skb); return NULL; } skb2 = skb_copy_expand(skb, ncb_len, max_size - skb->len - ncb_len - crc_len, GFP_ATOMIC); dev_kfree_skb_any(skb); if (!skb2) return NULL; skb = skb2; tmp = (void *) skb_push(skb, ncb_len); memset(tmp, 0, ncb_len); put_unaligned_le32(opts->nth_sign, tmp); /* dwSignature */ tmp += 2; /* wHeaderLength */ put_unaligned_le16(opts->nth_size, tmp++); tmp++; /* skip wSequence */ put_ncm(&tmp, opts->block_length, skb->len); /* (d)wBlockLength */ /* (d)wFpIndex */ /* the first pointer is right after the NTH + align */ put_ncm(&tmp, opts->fp_index, opts->nth_size + ndp_pad); tmp = (void *)tmp + ndp_pad; /* NDP */ put_unaligned_le32(opts->ndp_sign, tmp); /* dwSignature */ tmp += 2; /* wLength */ put_unaligned_le16(ncb_len - opts->nth_size - pad, tmp++); tmp += opts->reserved1; tmp += opts->next_fp_index; /* skip reserved (d)wNextFpIndex */ tmp += opts->reserved2; if (ncm->is_crc) { uint32_t crc; crc = ~crc32_le(~0, skb->data + ncb_len, skb->len - ncb_len); put_unaligned_le32(crc, skb->data + skb->len); skb_put(skb, crc_len); } /* (d)wDatagramIndex[0] */ put_ncm(&tmp, opts->dgram_item_len, ncb_len); /* (d)wDatagramLength[0] */ put_ncm(&tmp, opts->dgram_item_len, skb->len - ncb_len); /* (d)wDatagramIndex[1] and (d)wDatagramLength[1] already zeroed */ if (skb->len > MAX_TX_NONFIXED) memset(skb_put(skb, max_size - skb->len), 0, max_size - skb->len); return skb; } static int ncm_unwrap_ntb(struct gether *port, struct sk_buff *skb, struct sk_buff_head *list) { struct f_ncm *ncm = func_to_ncm(&port->func); __le16 *tmp = (void *) skb->data; unsigned index, index2; unsigned dg_len, dg_len2; unsigned ndp_len; struct sk_buff *skb2; int ret = -EINVAL; unsigned max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); struct ndp_parser_opts *opts = ncm->parser_opts; unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; int dgram_counter; /* dwSignature */ if (get_unaligned_le32(tmp) != opts->nth_sign) { INFO(port->func.config->cdev, "Wrong NTH SIGN, skblen %d\n", skb->len); print_hex_dump(KERN_INFO, "HEAD:", DUMP_PREFIX_ADDRESS, 32, 1, skb->data, 32, false); goto err; } tmp += 2; /* wHeaderLength */ if (get_unaligned_le16(tmp++) != opts->nth_size) { INFO(port->func.config->cdev, "Wrong NTB headersize\n"); goto err; } tmp++; /* skip wSequence */ /* (d)wBlockLength */ if (get_ncm(&tmp, opts->block_length) > max_size) { INFO(port->func.config->cdev, "OUT size exceeded\n"); goto err; } index = get_ncm(&tmp, opts->fp_index); /* NCM 3.2 */ if (((index % 4) != 0) && (index < opts->nth_size)) { INFO(port->func.config->cdev, "Bad index: %x\n", index); goto err; } /* walk through NDP */ tmp = ((void *)skb->data) + index; if (get_unaligned_le32(tmp) != opts->ndp_sign) { INFO(port->func.config->cdev, "Wrong NDP SIGN\n"); goto err; } tmp += 2; ndp_len = get_unaligned_le16(tmp++); /* * NCM 3.3.1 * entry is 2 items * item size is 16/32 bits, opts->dgram_item_len * 2 bytes * minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry */ if ((ndp_len < opts->ndp_size + 2 * 2 * (opts->dgram_item_len * 2)) || (ndp_len % opts->ndplen_align != 0)) { INFO(port->func.config->cdev, "Bad NDP length: %x\n", ndp_len); goto err; } tmp += opts->reserved1; tmp += opts->next_fp_index; /* skip reserved (d)wNextFpIndex */ tmp += opts->reserved2; ndp_len -= opts->ndp_size; index2 = get_ncm(&tmp, opts->dgram_item_len); dg_len2 = get_ncm(&tmp, opts->dgram_item_len); dgram_counter = 0; do { index = index2; dg_len = dg_len2; if (dg_len < 14 + crc_len) { /* ethernet header + crc */ INFO(port->func.config->cdev, "Bad dgram length: %x\n", dg_len); goto err; } if (ncm->is_crc) { uint32_t crc, crc2; crc = get_unaligned_le32(skb->data + index + dg_len - crc_len); crc2 = ~crc32_le(~0, skb->data + index, dg_len - crc_len); if (crc != crc2) { INFO(port->func.config->cdev, "Bad CRC\n"); goto err; } } index2 = get_ncm(&tmp, opts->dgram_item_len); dg_len2 = get_ncm(&tmp, opts->dgram_item_len); if (index2 == 0 || dg_len2 == 0) { skb2 = skb; } else { skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2 == NULL) goto err; } if (!skb_pull(skb2, index)) { ret = -EOVERFLOW; goto err; } skb_trim(skb2, dg_len - crc_len); skb_queue_tail(list, skb2); ndp_len -= 2 * (opts->dgram_item_len * 2); dgram_counter++; if (index2 == 0 || dg_len2 == 0) break; } while (ndp_len > 2 * (opts->dgram_item_len * 2)); /* zero entry */ VDBG(port->func.config->cdev, "Parsed NTB with %d frames\n", dgram_counter); return 0; err: skb_queue_purge(list); dev_kfree_skb_any(skb); return ret; } static void ncm_disable(struct usb_function *f) { struct f_ncm *ncm = func_to_ncm(f); struct usb_composite_dev *cdev = f->config->cdev; DBG(cdev, "ncm deactivated\n"); if (ncm->port.in_ep->driver_data) gether_disconnect(&ncm->port); if (ncm->notify->driver_data) { usb_ep_disable(ncm->notify); ncm->notify->driver_data = NULL; ncm->notify->desc = NULL; } } /*-------------------------------------------------------------------------*/ /* * Callbacks let us notify the host about connect/disconnect when the * net device is opened or closed. * * For testing, note that link states on this side include both opened * and closed variants of: * * - disconnected/unconfigured * - configured but inactive (data alt 0) * - configured and active (data alt 1) * * Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and * SET_INTERFACE (altsetting). Remember also that "configured" doesn't * imply the host is actually polling the notification endpoint, and * likewise that "active" doesn't imply it's actually using the data * endpoints for traffic. */ static void ncm_open(struct gether *geth) { struct f_ncm *ncm = func_to_ncm(&geth->func); DBG(ncm->port.func.config->cdev, "%s\n", __func__); spin_lock(&ncm->lock); ncm->is_open = true; ncm_notify(ncm); spin_unlock(&ncm->lock); } static void ncm_close(struct gether *geth) { struct f_ncm *ncm = func_to_ncm(&geth->func); DBG(ncm->port.func.config->cdev, "%s\n", __func__); spin_lock(&ncm->lock); ncm->is_open = false; ncm_notify(ncm); spin_unlock(&ncm->lock); } /*-------------------------------------------------------------------------*/ /* ethernet function driver setup/binding */ static int __init ncm_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_ncm *ncm = func_to_ncm(f); int status; struct usb_ep *ep; /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) goto fail; ncm->ctrl_id = status; ncm_iad_desc.bFirstInterface = status; ncm_control_intf.bInterfaceNumber = status; ncm_union_desc.bMasterInterface0 = status; status = usb_interface_id(c, f); if (status < 0) goto fail; ncm->data_id = status; ncm_data_nop_intf.bInterfaceNumber = status; ncm_data_intf.bInterfaceNumber = status; ncm_union_desc.bSlaveInterface0 = status; status = -ENODEV; /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_in_desc); if (!ep) goto fail; ncm->port.in_ep = ep; ep->driver_data = cdev; /* claim */ ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_out_desc); if (!ep) goto fail; ncm->port.out_ep = ep; ep->driver_data = cdev; /* claim */ ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_notify_desc); if (!ep) goto fail; ncm->notify = ep; ep->driver_data = cdev; /* claim */ status = -ENOMEM; /* allocate notification request and buffer */ ncm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL); if (!ncm->notify_req) goto fail; ncm->notify_req->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL); if (!ncm->notify_req->buf) goto fail; ncm->notify_req->context = ncm; ncm->notify_req->complete = ncm_notify_complete; /* copy descriptors, and track endpoint copies */ f->descriptors = usb_copy_descriptors(ncm_fs_function); if (!f->descriptors) goto fail; /* * support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at * both speeds */ if (gadget_is_dualspeed(c->cdev->gadget)) { hs_ncm_in_desc.bEndpointAddress = fs_ncm_in_desc.bEndpointAddress; hs_ncm_out_desc.bEndpointAddress = fs_ncm_out_desc.bEndpointAddress; hs_ncm_notify_desc.bEndpointAddress = fs_ncm_notify_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(ncm_hs_function); if (!f->hs_descriptors) goto fail; } /* * NOTE: all that is done without knowing or caring about * the network link ... which is unavailable to this code * until we're activated via set_alt(). */ ncm->port.open = ncm_open; ncm->port.close = ncm_close; DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n", gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", ncm->port.in_ep->name, ncm->port.out_ep->name, ncm->notify->name); return 0; fail: if (f->descriptors) usb_free_descriptors(f->descriptors); if (ncm->notify_req) { kfree(ncm->notify_req->buf); usb_ep_free_request(ncm->notify, ncm->notify_req); } /* we might as well release our claims on endpoints */ if (ncm->notify) ncm->notify->driver_data = NULL; if (ncm->port.out_ep->desc) ncm->port.out_ep->driver_data = NULL; if (ncm->port.in_ep->desc) ncm->port.in_ep->driver_data = NULL; ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); return status; } static void ncm_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_ncm *ncm = func_to_ncm(f); DBG(c->cdev, "ncm unbind\n"); if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); kfree(ncm->notify_req->buf); usb_ep_free_request(ncm->notify, ncm->notify_req); ncm_string_defs[1].s = NULL; kfree(ncm); } /** * ncm_bind_config - add CDC Network link to a configuration * @c: the configuration to support the network link * @ethaddr: a buffer in which the ethernet address of the host side * side of the link was recorded * Context: single threaded during gadget setup * * Returns zero on success, else negative errno. * * Caller must have called @gether_setup(). Caller is also responsible * for calling @gether_cleanup() before module unload. */ int __init ncm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]) { struct f_ncm *ncm; int status; if (!can_support_ecm(c->cdev->gadget) || !ethaddr) return -EINVAL; /* maybe allocate device-global string IDs */ if (ncm_string_defs[0].id == 0) { /* control interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; ncm_string_defs[STRING_CTRL_IDX].id = status; ncm_control_intf.iInterface = status; /* data interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; ncm_string_defs[STRING_DATA_IDX].id = status; ncm_data_nop_intf.iInterface = status; ncm_data_intf.iInterface = status; /* MAC address */ status = usb_string_id(c->cdev); if (status < 0) return status; ncm_string_defs[STRING_MAC_IDX].id = status; ecm_desc.iMACAddress = status; /* IAD */ status = usb_string_id(c->cdev); if (status < 0) return status; ncm_string_defs[STRING_IAD_IDX].id = status; ncm_iad_desc.iFunction = status; } /* allocate and initialize one new instance */ ncm = kzalloc(sizeof *ncm, GFP_KERNEL); if (!ncm) return -ENOMEM; /* export host's Ethernet address in CDC format */ snprintf(ncm->ethaddr, sizeof ncm->ethaddr, "%02X%02X%02X%02X%02X%02X", ethaddr[0], ethaddr[1], ethaddr[2], ethaddr[3], ethaddr[4], ethaddr[5]); ncm_string_defs[1].s = ncm->ethaddr; spin_lock_init(&ncm->lock); ncm_reset_values(ncm); ncm->port.is_fixed = true; ncm->port.func.name = "cdc_network"; ncm->port.func.strings = ncm_strings; /* descriptors are per-instance copies */ ncm->port.func.bind = ncm_bind; ncm->port.func.unbind = ncm_unbind; ncm->port.func.set_alt = ncm_set_alt; ncm->port.func.get_alt = ncm_get_alt; ncm->port.func.setup = ncm_setup; ncm->port.func.disable = ncm_disable; ncm->port.wrap = ncm_wrap_ntb; ncm->port.unwrap = ncm_unwrap_ntb; status = usb_add_function(c, &ncm->port.func); if (status) { ncm_string_defs[1].s = NULL; kfree(ncm); } return status; }
gpl-2.0
dwindsor/linux-next
kernel/debug/kdb/kdb_bp.c
418
11188
/* * Kernel Debugger Architecture Independent Breakpoint Handler * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved. */ #include <linux/string.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/kdb.h> #include <linux/kgdb.h> #include <linux/smp.h> #include <linux/sched.h> #include <linux/interrupt.h> #include "kdb_private.h" /* * Table of kdb_breakpoints */ kdb_bp_t kdb_breakpoints[KDB_MAXBPT]; static void kdb_setsinglestep(struct pt_regs *regs) { KDB_STATE_SET(DOING_SS); } static char *kdb_rwtypes[] = { "Instruction(i)", "Instruction(Register)", "Data Write", "I/O", "Data Access" }; static char *kdb_bptype(kdb_bp_t *bp) { if (bp->bp_type < 0 || bp->bp_type > 4) return ""; return kdb_rwtypes[bp->bp_type]; } static int kdb_parsebp(int argc, const char **argv, int *nextargp, kdb_bp_t *bp) { int nextarg = *nextargp; int diag; bp->bph_length = 1; if ((argc + 1) != nextarg) { if (strncasecmp(argv[nextarg], "datar", sizeof("datar")) == 0) bp->bp_type = BP_ACCESS_WATCHPOINT; else if (strncasecmp(argv[nextarg], "dataw", sizeof("dataw")) == 0) bp->bp_type = BP_WRITE_WATCHPOINT; else if (strncasecmp(argv[nextarg], "inst", sizeof("inst")) == 0) bp->bp_type = BP_HARDWARE_BREAKPOINT; else return KDB_ARGCOUNT; bp->bph_length = 1; nextarg++; if ((argc + 1) != nextarg) { unsigned long len; diag = kdbgetularg((char *)argv[nextarg], &len); if (diag) return diag; if (len > 8) return KDB_BADLENGTH; bp->bph_length = len; nextarg++; } if ((argc + 1) != nextarg) return KDB_ARGCOUNT; } *nextargp = nextarg; return 0; } static int _kdb_bp_remove(kdb_bp_t *bp) { int ret = 1; if (!bp->bp_installed) return ret; if (!bp->bp_type) ret = dbg_remove_sw_break(bp->bp_addr); else ret = arch_kgdb_ops.remove_hw_breakpoint(bp->bp_addr, bp->bph_length, bp->bp_type); if (ret == 0) bp->bp_installed = 0; return ret; } static void kdb_handle_bp(struct pt_regs *regs, kdb_bp_t *bp) { if (KDB_DEBUG(BP)) kdb_printf("regs->ip = 0x%lx\n", instruction_pointer(regs)); /* * Setup single step */ kdb_setsinglestep(regs); /* * Reset delay attribute */ bp->bp_delay = 0; bp->bp_delayed = 1; } static int _kdb_bp_install(struct pt_regs *regs, kdb_bp_t *bp) { int ret; /* * Install the breakpoint, if it is not already installed. */ if (KDB_DEBUG(BP)) kdb_printf("%s: bp_installed %d\n", __func__, bp->bp_installed); if (!KDB_STATE(SSBPT)) bp->bp_delay = 0; if (bp->bp_installed) return 1; if (bp->bp_delay || (bp->bp_delayed && KDB_STATE(DOING_SS))) { if (KDB_DEBUG(BP)) kdb_printf("%s: delayed bp\n", __func__); kdb_handle_bp(regs, bp); return 0; } if (!bp->bp_type) ret = dbg_set_sw_break(bp->bp_addr); else ret = arch_kgdb_ops.set_hw_breakpoint(bp->bp_addr, bp->bph_length, bp->bp_type); if (ret == 0) { bp->bp_installed = 1; } else { kdb_printf("%s: failed to set breakpoint at 0x%lx\n", __func__, bp->bp_addr); if (!bp->bp_type) { kdb_printf("Software breakpoints are unavailable.\n" " Boot the kernel with rodata=off\n" " OR use hw breaks: help bph\n"); } return 1; } return 0; } /* * kdb_bp_install * * Install kdb_breakpoints prior to returning from the * kernel debugger. This allows the kdb_breakpoints to be set * upon functions that are used internally by kdb, such as * printk(). This function is only called once per kdb session. */ void kdb_bp_install(struct pt_regs *regs) { int i; for (i = 0; i < KDB_MAXBPT; i++) { kdb_bp_t *bp = &kdb_breakpoints[i]; if (KDB_DEBUG(BP)) { kdb_printf("%s: bp %d bp_enabled %d\n", __func__, i, bp->bp_enabled); } if (bp->bp_enabled) _kdb_bp_install(regs, bp); } } /* * kdb_bp_remove * * Remove kdb_breakpoints upon entry to the kernel debugger. * * Parameters: * None. * Outputs: * None. * Returns: * None. * Locking: * None. * Remarks: */ void kdb_bp_remove(void) { int i; for (i = KDB_MAXBPT - 1; i >= 0; i--) { kdb_bp_t *bp = &kdb_breakpoints[i]; if (KDB_DEBUG(BP)) { kdb_printf("%s: bp %d bp_enabled %d\n", __func__, i, bp->bp_enabled); } if (bp->bp_enabled) _kdb_bp_remove(bp); } } /* * kdb_printbp * * Internal function to format and print a breakpoint entry. * * Parameters: * None. * Outputs: * None. * Returns: * None. * Locking: * None. * Remarks: */ static void kdb_printbp(kdb_bp_t *bp, int i) { kdb_printf("%s ", kdb_bptype(bp)); kdb_printf("BP #%d at ", i); kdb_symbol_print(bp->bp_addr, NULL, KDB_SP_DEFAULT); if (bp->bp_enabled) kdb_printf("\n is enabled"); else kdb_printf("\n is disabled"); kdb_printf("\taddr at %016lx, hardtype=%d installed=%d\n", bp->bp_addr, bp->bp_type, bp->bp_installed); kdb_printf("\n"); } /* * kdb_bp * * Handle the bp commands. * * [bp|bph] <addr-expression> [DATAR|DATAW] * * Parameters: * argc Count of arguments in argv * argv Space delimited command line arguments * Outputs: * None. * Returns: * Zero for success, a kdb diagnostic if failure. * Locking: * None. * Remarks: * * bp Set breakpoint on all cpus. Only use hardware assist if need. * bph Set breakpoint on all cpus. Force hardware register */ static int kdb_bp(int argc, const char **argv) { int i, bpno; kdb_bp_t *bp, *bp_check; int diag; char *symname = NULL; long offset = 0ul; int nextarg; kdb_bp_t template = {0}; if (argc == 0) { /* * Display breakpoint table */ for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { if (bp->bp_free) continue; kdb_printbp(bp, bpno); } return 0; } nextarg = 1; diag = kdbgetaddrarg(argc, argv, &nextarg, &template.bp_addr, &offset, &symname); if (diag) return diag; if (!template.bp_addr) return KDB_BADINT; /* * Find an empty bp structure to allocate */ for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { if (bp->bp_free) break; } if (bpno == KDB_MAXBPT) return KDB_TOOMANYBPT; if (strcmp(argv[0], "bph") == 0) { template.bp_type = BP_HARDWARE_BREAKPOINT; diag = kdb_parsebp(argc, argv, &nextarg, &template); if (diag) return diag; } else { template.bp_type = BP_BREAKPOINT; } /* * Check for clashing breakpoints. * * Note, in this design we can't have hardware breakpoints * enabled for both read and write on the same address. */ for (i = 0, bp_check = kdb_breakpoints; i < KDB_MAXBPT; i++, bp_check++) { if (!bp_check->bp_free && bp_check->bp_addr == template.bp_addr) { kdb_printf("You already have a breakpoint at " kdb_bfd_vma_fmt0 "\n", template.bp_addr); return KDB_DUPBPT; } } template.bp_enabled = 1; /* * Actually allocate the breakpoint found earlier */ *bp = template; bp->bp_free = 0; kdb_printbp(bp, bpno); return 0; } /* * kdb_bc * * Handles the 'bc', 'be', and 'bd' commands * * [bd|bc|be] <breakpoint-number> * [bd|bc|be] * * * Parameters: * argc Count of arguments in argv * argv Space delimited command line arguments * Outputs: * None. * Returns: * Zero for success, a kdb diagnostic for failure * Locking: * None. * Remarks: */ static int kdb_bc(int argc, const char **argv) { unsigned long addr; kdb_bp_t *bp = NULL; int lowbp = KDB_MAXBPT; int highbp = 0; int done = 0; int i; int diag = 0; int cmd; /* KDBCMD_B? */ #define KDBCMD_BC 0 #define KDBCMD_BE 1 #define KDBCMD_BD 2 if (strcmp(argv[0], "be") == 0) cmd = KDBCMD_BE; else if (strcmp(argv[0], "bd") == 0) cmd = KDBCMD_BD; else cmd = KDBCMD_BC; if (argc != 1) return KDB_ARGCOUNT; if (strcmp(argv[1], "*") == 0) { lowbp = 0; highbp = KDB_MAXBPT; } else { diag = kdbgetularg(argv[1], &addr); if (diag) return diag; /* * For addresses less than the maximum breakpoint number, * assume that the breakpoint number is desired. */ if (addr < KDB_MAXBPT) { bp = &kdb_breakpoints[addr]; lowbp = highbp = addr; highbp++; } else { for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) { if (bp->bp_addr == addr) { lowbp = highbp = i; highbp++; break; } } } } /* * Now operate on the set of breakpoints matching the input * criteria (either '*' for all, or an individual breakpoint). */ for (bp = &kdb_breakpoints[lowbp], i = lowbp; i < highbp; i++, bp++) { if (bp->bp_free) continue; done++; switch (cmd) { case KDBCMD_BC: bp->bp_enabled = 0; kdb_printf("Breakpoint %d at " kdb_bfd_vma_fmt " cleared\n", i, bp->bp_addr); bp->bp_addr = 0; bp->bp_free = 1; break; case KDBCMD_BE: bp->bp_enabled = 1; kdb_printf("Breakpoint %d at " kdb_bfd_vma_fmt " enabled", i, bp->bp_addr); kdb_printf("\n"); break; case KDBCMD_BD: if (!bp->bp_enabled) break; bp->bp_enabled = 0; kdb_printf("Breakpoint %d at " kdb_bfd_vma_fmt " disabled\n", i, bp->bp_addr); break; } if (bp->bp_delay && (cmd == KDBCMD_BC || cmd == KDBCMD_BD)) { bp->bp_delay = 0; KDB_STATE_CLEAR(SSBPT); } } return (!done) ? KDB_BPTNOTFOUND : 0; } /* * kdb_ss * * Process the 'ss' (Single Step) command. * * ss * * Parameters: * argc Argument count * argv Argument vector * Outputs: * None. * Returns: * KDB_CMD_SS for success, a kdb error if failure. * Locking: * None. * Remarks: * * Set the arch specific option to trigger a debug trap after the next * instruction. */ static int kdb_ss(int argc, const char **argv) { if (argc != 0) return KDB_ARGCOUNT; /* * Set trace flag and go. */ KDB_STATE_SET(DOING_SS); return KDB_CMD_SS; } /* Initialize the breakpoint table and register breakpoint commands. */ void __init kdb_initbptab(void) { int i; kdb_bp_t *bp; /* * First time initialization. */ memset(&kdb_breakpoints, '\0', sizeof(kdb_breakpoints)); for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) bp->bp_free = 1; kdb_register_flags("bp", kdb_bp, "[<vaddr>]", "Set/Display breakpoints", 0, KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); kdb_register_flags("bl", kdb_bp, "[<vaddr>]", "Display breakpoints", 0, KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) kdb_register_flags("bph", kdb_bp, "[<vaddr>]", "[datar [length]|dataw [length]] Set hw brk", 0, KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); kdb_register_flags("bc", kdb_bc, "<bpnum>", "Clear Breakpoint", 0, KDB_ENABLE_FLOW_CTRL); kdb_register_flags("be", kdb_bc, "<bpnum>", "Enable Breakpoint", 0, KDB_ENABLE_FLOW_CTRL); kdb_register_flags("bd", kdb_bc, "<bpnum>", "Disable Breakpoint", 0, KDB_ENABLE_FLOW_CTRL); kdb_register_flags("ss", kdb_ss, "", "Single Step", 1, KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); /* * Architecture dependent initialization. */ }
gpl-2.0
NETFORCE2/linux
arch/arm/mach-socfpga/socfpga.c
418
2965
/* * Copyright (C) 2012 Altera Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/irqchip.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/reboot.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "core.h" void __iomem *socfpga_scu_base_addr = ((void __iomem *)(SOCFPGA_SCU_VIRT_BASE)); void __iomem *sys_manager_base_addr; void __iomem *rst_manager_base_addr; unsigned long socfpga_cpu1start_addr; static struct map_desc scu_io_desc __initdata = { .virtual = SOCFPGA_SCU_VIRT_BASE, .pfn = 0, /* run-time */ .length = SZ_8K, .type = MT_DEVICE, }; static struct map_desc uart_io_desc __initdata = { .virtual = 0xfec02000, .pfn = __phys_to_pfn(0xffc02000), .length = SZ_8K, .type = MT_DEVICE, }; static void __init socfpga_scu_map_io(void) { unsigned long base; /* Get SCU base */ asm("mrc p15, 4, %0, c15, c0, 0" : "=r" (base)); scu_io_desc.pfn = __phys_to_pfn(base); iotable_init(&scu_io_desc, 1); } static void __init socfpga_map_io(void) { socfpga_scu_map_io(); iotable_init(&uart_io_desc, 1); early_printk("Early printk initialized\n"); } void __init socfpga_sysmgr_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "altr,sys-mgr"); if (of_property_read_u32(np, "cpu1-start-addr", (u32 *) &socfpga_cpu1start_addr)) pr_err("SMP: Need cpu1-start-addr in device tree.\n"); sys_manager_base_addr = of_iomap(np, 0); np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr"); rst_manager_base_addr = of_iomap(np, 0); } static void __init socfpga_init_irq(void) { irqchip_init(); socfpga_sysmgr_init(); } static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd) { u32 temp; temp = readl(rst_manager_base_addr + SOCFPGA_RSTMGR_CTRL); if (mode == REBOOT_HARD) temp |= RSTMGR_CTRL_SWCOLDRSTREQ; else temp |= RSTMGR_CTRL_SWWARMRSTREQ; writel(temp, rst_manager_base_addr + SOCFPGA_RSTMGR_CTRL); } static const char *altera_dt_match[] = { "altr,socfpga", NULL }; DT_MACHINE_START(SOCFPGA, "Altera SOCFPGA") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .smp = smp_ops(socfpga_smp_ops), .map_io = socfpga_map_io, .init_irq = socfpga_init_irq, .restart = socfpga_cyclone5_restart, .dt_compat = altera_dt_match, MACHINE_END
gpl-2.0
anoever/thunderbolt
drivers/char/hangcheck-timer.c
418
5837
/* * hangcheck-timer.c * * Driver for a little io fencing timer. * * Copyright (C) 2002, 2003 Oracle. All rights reserved. * * Author: Joel Becker <joel.becker@oracle.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ /* * The hangcheck-timer driver uses the TSC to catch delays that * jiffies does not notice. A timer is set. When the timer fires, it * checks whether it was delayed and if that delay exceeds a given * margin of error. The hangcheck_tick module parameter takes the timer * duration in seconds. The hangcheck_margin parameter defines the * margin of error, in seconds. The defaults are 60 seconds for the * timer and 180 seconds for the margin of error. IOW, a timer is set * for 60 seconds. When the timer fires, the callback checks the * actual duration that the timer waited. If the duration exceeds the * alloted time and margin (here 60 + 180, or 240 seconds), the machine * is restarted. A healthy machine will have the duration match the * expected timeout very closely. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/delay.h> #include <asm/uaccess.h> #include <linux/sysrq.h> #include <linux/timer.h> #include <linux/hrtimer.h> #define VERSION_STR "0.9.1" #define DEFAULT_IOFENCE_MARGIN 60 /* Default fudge factor, in seconds */ #define DEFAULT_IOFENCE_TICK 180 /* Default timer timeout, in seconds */ static int hangcheck_tick = DEFAULT_IOFENCE_TICK; static int hangcheck_margin = DEFAULT_IOFENCE_MARGIN; static int hangcheck_reboot; /* Defaults to not reboot */ static int hangcheck_dump_tasks; /* Defaults to not dumping SysRQ T */ /* options - modular */ module_param(hangcheck_tick, int, 0); MODULE_PARM_DESC(hangcheck_tick, "Timer delay."); module_param(hangcheck_margin, int, 0); MODULE_PARM_DESC(hangcheck_margin, "If the hangcheck timer has been delayed more than hangcheck_margin seconds, the driver will fire."); module_param(hangcheck_reboot, int, 0); MODULE_PARM_DESC(hangcheck_reboot, "If nonzero, the machine will reboot when the timer margin is exceeded."); module_param(hangcheck_dump_tasks, int, 0); MODULE_PARM_DESC(hangcheck_dump_tasks, "If nonzero, the machine will dump the system task state when the timer margin is exceeded."); MODULE_AUTHOR("Oracle"); MODULE_DESCRIPTION("Hangcheck-timer detects when the system has gone out to lunch past a certain margin."); MODULE_LICENSE("GPL"); MODULE_VERSION(VERSION_STR); /* options - nonmodular */ #ifndef MODULE static int __init hangcheck_parse_tick(char *str) { int par; if (get_option(&str,&par)) hangcheck_tick = par; return 1; } static int __init hangcheck_parse_margin(char *str) { int par; if (get_option(&str,&par)) hangcheck_margin = par; return 1; } static int __init hangcheck_parse_reboot(char *str) { int par; if (get_option(&str,&par)) hangcheck_reboot = par; return 1; } static int __init hangcheck_parse_dump_tasks(char *str) { int par; if (get_option(&str,&par)) hangcheck_dump_tasks = par; return 1; } __setup("hcheck_tick", hangcheck_parse_tick); __setup("hcheck_margin", hangcheck_parse_margin); __setup("hcheck_reboot", hangcheck_parse_reboot); __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks); #endif /* not MODULE */ #define TIMER_FREQ 1000000000ULL /* Last time scheduled */ static unsigned long long hangcheck_tsc, hangcheck_tsc_margin; static void hangcheck_fire(unsigned long); static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire, 0, 0); static void hangcheck_fire(unsigned long data) { unsigned long long cur_tsc, tsc_diff; cur_tsc = ktime_get_ns(); if (cur_tsc > hangcheck_tsc) tsc_diff = cur_tsc - hangcheck_tsc; else tsc_diff = (cur_tsc + (~0ULL - hangcheck_tsc)); /* or something */ if (tsc_diff > hangcheck_tsc_margin) { if (hangcheck_dump_tasks) { printk(KERN_CRIT "Hangcheck: Task state:\n"); #ifdef CONFIG_MAGIC_SYSRQ handle_sysrq('t'); #endif /* CONFIG_MAGIC_SYSRQ */ } if (hangcheck_reboot) { printk(KERN_CRIT "Hangcheck: hangcheck is restarting the machine.\n"); emergency_restart(); } else { printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n"); } } #if 0 /* * Enable to investigate delays in detail */ printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n", tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ); #endif mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ)); hangcheck_tsc = ktime_get_ns(); } static int __init hangcheck_init(void) { printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n", VERSION_STR, hangcheck_tick, hangcheck_margin); hangcheck_tsc_margin = (unsigned long long)(hangcheck_margin + hangcheck_tick); hangcheck_tsc_margin *= (unsigned long long)TIMER_FREQ; hangcheck_tsc = ktime_get_ns(); mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ)); return 0; } static void __exit hangcheck_exit(void) { del_timer_sync(&hangcheck_ticktock); printk("Hangcheck: Stopped hangcheck timer.\n"); } module_init(hangcheck_init); module_exit(hangcheck_exit);
gpl-2.0
mndza/linux-xlnx
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
418
6659
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/gpuobj.h> #include <core/engine.h> #include <subdev/instmem.h> #include <subdev/bar.h> #include <subdev/mmu.h> /* fast-path, where backend is able to provide direct pointer to memory */ static u32 nvkm_gpuobj_rd32_fast(struct nvkm_gpuobj *gpuobj, u32 offset) { return ioread32_native(gpuobj->map + offset); } static void nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data) { iowrite32_native(data, gpuobj->map + offset); } /* accessor functions for gpuobjs allocated directly from instmem */ static u32 nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset) { return nvkm_ro32(gpuobj->memory, offset); } static void nvkm_gpuobj_heap_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data) { nvkm_wo32(gpuobj->memory, offset, data); } static const struct nvkm_gpuobj_func nvkm_gpuobj_heap; static void nvkm_gpuobj_heap_release(struct nvkm_gpuobj *gpuobj) { gpuobj->func = &nvkm_gpuobj_heap; nvkm_done(gpuobj->memory); } static const struct nvkm_gpuobj_func nvkm_gpuobj_heap_fast = { .release = nvkm_gpuobj_heap_release, .rd32 = nvkm_gpuobj_rd32_fast, .wr32 = nvkm_gpuobj_wr32_fast, }; static const struct nvkm_gpuobj_func nvkm_gpuobj_heap_slow = { .release = nvkm_gpuobj_heap_release, .rd32 = nvkm_gpuobj_heap_rd32, .wr32 = nvkm_gpuobj_heap_wr32, }; static void * nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj) { gpuobj->map = nvkm_kmap(gpuobj->memory); if (likely(gpuobj->map)) gpuobj->func = &nvkm_gpuobj_heap_fast; else gpuobj->func = &nvkm_gpuobj_heap_slow; return gpuobj->map; } static const struct nvkm_gpuobj_func nvkm_gpuobj_heap = { .acquire = nvkm_gpuobj_heap_acquire, }; /* accessor functions for gpuobjs sub-allocated from a parent gpuobj */ static u32 nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset) { return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset); } static void nvkm_gpuobj_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data) { nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data); } static const struct nvkm_gpuobj_func nvkm_gpuobj_func; static void nvkm_gpuobj_release(struct nvkm_gpuobj *gpuobj) { gpuobj->func = &nvkm_gpuobj_func; nvkm_done(gpuobj->parent); } static const struct nvkm_gpuobj_func nvkm_gpuobj_fast = { .release = nvkm_gpuobj_release, .rd32 = nvkm_gpuobj_rd32_fast, .wr32 = nvkm_gpuobj_wr32_fast, }; static const struct nvkm_gpuobj_func nvkm_gpuobj_slow = { .release = nvkm_gpuobj_release, .rd32 = nvkm_gpuobj_rd32, .wr32 = nvkm_gpuobj_wr32, }; static void * nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj) { gpuobj->map = nvkm_kmap(gpuobj->parent); if (likely(gpuobj->map)) { gpuobj->map = (u8 *)gpuobj->map + gpuobj->node->offset; gpuobj->func = &nvkm_gpuobj_fast; } else { gpuobj->func = &nvkm_gpuobj_slow; } return gpuobj->map; } static const struct nvkm_gpuobj_func nvkm_gpuobj_func = { .acquire = nvkm_gpuobj_acquire, }; static int nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero, struct nvkm_gpuobj *parent, struct nvkm_gpuobj *gpuobj) { u32 offset; int ret; if (parent) { if (align >= 0) { ret = nvkm_mm_head(&parent->heap, 0, 1, size, size, max(align, 1), &gpuobj->node); } else { ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size, -align, &gpuobj->node); } if (ret) return ret; gpuobj->parent = parent; gpuobj->func = &nvkm_gpuobj_func; gpuobj->addr = parent->addr + gpuobj->node->offset; gpuobj->size = gpuobj->node->length; if (zero) { nvkm_kmap(gpuobj); for (offset = 0; offset < gpuobj->size; offset += 4) nvkm_wo32(gpuobj, offset, 0x00000000); nvkm_done(gpuobj); } } else { ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, abs(align), zero, &gpuobj->memory); if (ret) return ret; gpuobj->func = &nvkm_gpuobj_heap; gpuobj->addr = nvkm_memory_addr(gpuobj->memory); gpuobj->size = nvkm_memory_size(gpuobj->memory); } return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1); } void nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj) { struct nvkm_gpuobj *gpuobj = *pgpuobj; if (gpuobj) { if (gpuobj->parent) nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node); nvkm_mm_fini(&gpuobj->heap); nvkm_memory_del(&gpuobj->memory); kfree(*pgpuobj); *pgpuobj = NULL; } } int nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero, struct nvkm_gpuobj *parent, struct nvkm_gpuobj **pgpuobj) { struct nvkm_gpuobj *gpuobj; int ret; if (!(gpuobj = *pgpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL))) return -ENOMEM; ret = nvkm_gpuobj_ctor(device, size, align, zero, parent, gpuobj); if (ret) nvkm_gpuobj_del(pgpuobj); return ret; } int nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm, u32 access, struct nvkm_vma *vma) { struct nvkm_memory *memory = gpuobj->memory; int ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma); if (ret == 0) nvkm_memory_map(memory, vma, 0); return ret; } void nvkm_gpuobj_unmap(struct nvkm_vma *vma) { if (vma->node) { nvkm_vm_unmap(vma); nvkm_vm_put(vma); } } /* the below is basically only here to support sharing the paged dma object * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work * anywhere else. */ int nvkm_gpuobj_wrap(struct nvkm_memory *memory, struct nvkm_gpuobj **pgpuobj) { if (!(*pgpuobj = kzalloc(sizeof(**pgpuobj), GFP_KERNEL))) return -ENOMEM; (*pgpuobj)->addr = nvkm_memory_addr(memory); (*pgpuobj)->size = nvkm_memory_size(memory); return 0; }
gpl-2.0
simo97/linux
drivers/video/fbdev/mmp/fb/mmpfb.c
1442
19108
/* * linux/drivers/video/mmp/fb/mmpfb.c * Framebuffer driver for Marvell Display controller. * * Copyright (C) 2012 Marvell Technology Group Ltd. * Authors: Zhou Zhu <zzhu3@marvell.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. * */ #include <linux/module.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include "mmpfb.h" static int var_to_pixfmt(struct fb_var_screeninfo *var) { /* * Pseudocolor mode? */ if (var->bits_per_pixel == 8) return PIXFMT_PSEUDOCOLOR; /* * Check for YUV422PLANAR. */ if (var->bits_per_pixel == 16 && var->red.length == 8 && var->green.length == 4 && var->blue.length == 4) { if (var->green.offset >= var->blue.offset) return PIXFMT_YUV422P; else return PIXFMT_YVU422P; } /* * Check for YUV420PLANAR. */ if (var->bits_per_pixel == 12 && var->red.length == 8 && var->green.length == 2 && var->blue.length == 2) { if (var->green.offset >= var->blue.offset) return PIXFMT_YUV420P; else return PIXFMT_YVU420P; } /* * Check for YUV422PACK. */ if (var->bits_per_pixel == 16 && var->red.length == 16 && var->green.length == 16 && var->blue.length == 16) { if (var->red.offset == 0) return PIXFMT_YUYV; else if (var->green.offset >= var->blue.offset) return PIXFMT_UYVY; else return PIXFMT_VYUY; } /* * Check for 565/1555. */ if (var->bits_per_pixel == 16 && var->red.length <= 5 && var->green.length <= 6 && var->blue.length <= 5) { if (var->transp.length == 0) { if (var->red.offset >= var->blue.offset) return PIXFMT_RGB565; else return PIXFMT_BGR565; } } /* * Check for 888/A888. */ if (var->bits_per_pixel <= 32 && var->red.length <= 8 && var->green.length <= 8 && var->blue.length <= 8) { if (var->bits_per_pixel == 24 && var->transp.length == 0) { if (var->red.offset >= var->blue.offset) return PIXFMT_RGB888PACK; else return PIXFMT_BGR888PACK; } if (var->bits_per_pixel == 32 && var->transp.offset == 24) { if (var->red.offset >= var->blue.offset) return PIXFMT_RGBA888; else return PIXFMT_BGRA888; } else { if (var->red.offset >= var->blue.offset) return PIXFMT_RGB888UNPACK; else return PIXFMT_BGR888UNPACK; } /* fall through */ } return -EINVAL; } static void pixfmt_to_var(struct fb_var_screeninfo *var, int pix_fmt) { switch (pix_fmt) { case PIXFMT_RGB565: var->bits_per_pixel = 16; var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case PIXFMT_BGR565: var->bits_per_pixel = 16; var->red.offset = 0; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 11; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case PIXFMT_RGB888UNPACK: var->bits_per_pixel = 32; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case PIXFMT_BGR888UNPACK: var->bits_per_pixel = 32; var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case PIXFMT_RGBA888: var->bits_per_pixel = 32; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; case PIXFMT_BGRA888: var->bits_per_pixel = 32; var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; case PIXFMT_RGB888PACK: var->bits_per_pixel = 24; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case PIXFMT_BGR888PACK: var->bits_per_pixel = 24; var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case PIXFMT_YUV420P: var->bits_per_pixel = 12; var->red.offset = 4; var->red.length = 8; var->green.offset = 2; var->green.length = 2; var->blue.offset = 0; var->blue.length = 2; var->transp.offset = 0; var->transp.length = 0; break; case PIXFMT_YVU420P: var->bits_per_pixel = 12; var->red.offset = 4; var->red.length = 8; var->green.offset = 0; var->green.length = 2; var->blue.offset = 2; var->blue.length = 2; var->transp.offset = 0; var->transp.length = 0; break; case PIXFMT_YUV422P: var->bits_per_pixel = 16; var->red.offset = 8; var->red.length = 8; var->green.offset = 4; var->green.length = 4; var->blue.offset = 0; var->blue.length = 4; var->transp.offset = 0; var->transp.length = 0; break; case PIXFMT_YVU422P: var->bits_per_pixel = 16; var->red.offset = 8; var->red.length = 8; var->green.offset = 0; var->green.length = 4; var->blue.offset = 4; var->blue.length = 4; var->transp.offset = 0; var->transp.length = 0; break; case PIXFMT_UYVY: var->bits_per_pixel = 16; var->red.offset = 8; var->red.length = 16; var->green.offset = 4; var->green.length = 16; var->blue.offset = 0; var->blue.length = 16; var->transp.offset = 0; var->transp.length = 0; break; case PIXFMT_VYUY: var->bits_per_pixel = 16; var->red.offset = 8; var->red.length = 16; var->green.offset = 0; var->green.length = 16; var->blue.offset = 4; var->blue.length = 16; var->transp.offset = 0; var->transp.length = 0; break; case PIXFMT_YUYV: var->bits_per_pixel = 16; var->red.offset = 0; var->red.length = 16; var->green.offset = 4; var->green.length = 16; var->blue.offset = 8; var->blue.length = 16; var->transp.offset = 0; var->transp.length = 0; break; case PIXFMT_PSEUDOCOLOR: var->bits_per_pixel = 8; var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; } } /* * fb framework has its limitation: * 1. input color/output color is not seprated * 2. fb_videomode not include output color * so for fb usage, we keep a output format which is not changed * then it's added for mmpmode */ static void fbmode_to_mmpmode(struct mmp_mode *mode, struct fb_videomode *videomode, int output_fmt) { u64 div_result = 1000000000000ll; mode->name = videomode->name; mode->refresh = videomode->refresh; mode->xres = videomode->xres; mode->yres = videomode->yres; do_div(div_result, videomode->pixclock); mode->pixclock_freq = (u32)div_result; mode->left_margin = videomode->left_margin; mode->right_margin = videomode->right_margin; mode->upper_margin = videomode->upper_margin; mode->lower_margin = videomode->lower_margin; mode->hsync_len = videomode->hsync_len; mode->vsync_len = videomode->vsync_len; mode->hsync_invert = !!(videomode->sync & FB_SYNC_HOR_HIGH_ACT); mode->vsync_invert = !!(videomode->sync & FB_SYNC_VERT_HIGH_ACT); /* no defined flag in fb, use vmode>>3*/ mode->invert_pixclock = !!(videomode->vmode & 8); mode->pix_fmt_out = output_fmt; } static void mmpmode_to_fbmode(struct fb_videomode *videomode, struct mmp_mode *mode) { u64 div_result = 1000000000000ll; videomode->name = mode->name; videomode->refresh = mode->refresh; videomode->xres = mode->xres; videomode->yres = mode->yres; do_div(div_result, mode->pixclock_freq); videomode->pixclock = (u32)div_result; videomode->left_margin = mode->left_margin; videomode->right_margin = mode->right_margin; videomode->upper_margin = mode->upper_margin; videomode->lower_margin = mode->lower_margin; videomode->hsync_len = mode->hsync_len; videomode->vsync_len = mode->vsync_len; videomode->sync = (mode->hsync_invert ? FB_SYNC_HOR_HIGH_ACT : 0) | (mode->vsync_invert ? FB_SYNC_VERT_HIGH_ACT : 0); videomode->vmode = mode->invert_pixclock ? 8 : 0; } static int mmpfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct mmpfb_info *fbi = info->par; if (var->bits_per_pixel == 8) return -EINVAL; /* * Basic geometry sanity checks. */ if (var->xoffset + var->xres > var->xres_virtual) return -EINVAL; if (var->yoffset + var->yres > var->yres_virtual) return -EINVAL; /* * Check size of framebuffer. */ if (var->xres_virtual * var->yres_virtual * (var->bits_per_pixel >> 3) > fbi->fb_size) return -EINVAL; return 0; } static unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf) { return ((chan & 0xffff) >> (16 - bf->length)) << bf->offset; } static u32 to_rgb(u16 red, u16 green, u16 blue) { red >>= 8; green >>= 8; blue >>= 8; return (red << 16) | (green << 8) | blue; } static int mmpfb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int trans, struct fb_info *info) { struct mmpfb_info *fbi = info->par; u32 val; if (info->fix.visual == FB_VISUAL_TRUECOLOR && regno < 16) { val = chan_to_field(red, &info->var.red); val |= chan_to_field(green, &info->var.green); val |= chan_to_field(blue , &info->var.blue); fbi->pseudo_palette[regno] = val; } if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR && regno < 256) { val = to_rgb(red, green, blue); /* TODO */ } return 0; } static int mmpfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct mmpfb_info *fbi = info->par; struct mmp_addr addr; memset(&addr, 0, sizeof(addr)); addr.phys[0] = (var->yoffset * var->xres_virtual + var->xoffset) * var->bits_per_pixel / 8 + fbi->fb_start_dma; mmp_overlay_set_addr(fbi->overlay, &addr); return 0; } static int var_update(struct fb_info *info) { struct mmpfb_info *fbi = info->par; struct fb_var_screeninfo *var = &info->var; struct fb_videomode *m; int pix_fmt; /* set pix_fmt */ pix_fmt = var_to_pixfmt(var); if (pix_fmt < 0) return -EINVAL; pixfmt_to_var(var, pix_fmt); fbi->pix_fmt = pix_fmt; /* set var according to best video mode*/ m = (struct fb_videomode *)fb_match_mode(var, &info->modelist); if (!m) { dev_err(fbi->dev, "set par: no match mode, use best mode\n"); m = (struct fb_videomode *)fb_find_best_mode(var, &info->modelist); fb_videomode_to_var(var, m); } memcpy(&fbi->mode, m, sizeof(struct fb_videomode)); /* fix to 2* yres */ var->yres_virtual = var->yres * 2; info->fix.visual = (pix_fmt == PIXFMT_PSEUDOCOLOR) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8; info->fix.ypanstep = var->yres; return 0; } static void mmpfb_set_win(struct fb_info *info) { struct mmpfb_info *fbi = info->par; struct fb_var_screeninfo *var = &info->var; struct mmp_win win; u32 stride; memset(&win, 0, sizeof(win)); win.xsrc = win.xdst = fbi->mode.xres; win.ysrc = win.ydst = fbi->mode.yres; win.pix_fmt = fbi->pix_fmt; stride = pixfmt_to_stride(win.pix_fmt); win.pitch[0] = var->xres_virtual * stride; win.pitch[1] = win.pitch[2] = (stride == 1) ? (var->xres_virtual >> 1) : 0; mmp_overlay_set_win(fbi->overlay, &win); } static int mmpfb_set_par(struct fb_info *info) { struct mmpfb_info *fbi = info->par; struct fb_var_screeninfo *var = &info->var; struct mmp_addr addr; struct mmp_mode mode; int ret; ret = var_update(info); if (ret != 0) return ret; /* set window/path according to new videomode */ fbmode_to_mmpmode(&mode, &fbi->mode, fbi->output_fmt); mmp_path_set_mode(fbi->path, &mode); /* set window related info */ mmpfb_set_win(info); /* set address always */ memset(&addr, 0, sizeof(addr)); addr.phys[0] = (var->yoffset * var->xres_virtual + var->xoffset) * var->bits_per_pixel / 8 + fbi->fb_start_dma; mmp_overlay_set_addr(fbi->overlay, &addr); return 0; } static void mmpfb_power(struct mmpfb_info *fbi, int power) { struct mmp_addr addr; struct fb_var_screeninfo *var = &fbi->fb_info->var; /* for power on, always set address/window again */ if (power) { /* set window related info */ mmpfb_set_win(fbi->fb_info); /* set address always */ memset(&addr, 0, sizeof(addr)); addr.phys[0] = fbi->fb_start_dma + (var->yoffset * var->xres_virtual + var->xoffset) * var->bits_per_pixel / 8; mmp_overlay_set_addr(fbi->overlay, &addr); } mmp_overlay_set_onoff(fbi->overlay, power); } static int mmpfb_blank(int blank, struct fb_info *info) { struct mmpfb_info *fbi = info->par; mmpfb_power(fbi, (blank == FB_BLANK_UNBLANK)); return 0; } static struct fb_ops mmpfb_ops = { .owner = THIS_MODULE, .fb_blank = mmpfb_blank, .fb_check_var = mmpfb_check_var, .fb_set_par = mmpfb_set_par, .fb_setcolreg = mmpfb_setcolreg, .fb_pan_display = mmpfb_pan_display, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; static int modes_setup(struct mmpfb_info *fbi) { struct fb_videomode *videomodes; struct mmp_mode *mmp_modes; struct fb_info *info = fbi->fb_info; int videomode_num, i; /* get videomodes from path */ videomode_num = mmp_path_get_modelist(fbi->path, &mmp_modes); if (!videomode_num) { dev_warn(fbi->dev, "can't get videomode num\n"); return 0; } /* put videomode list to info structure */ videomodes = kzalloc(sizeof(struct fb_videomode) * videomode_num, GFP_KERNEL); if (!videomodes) { dev_err(fbi->dev, "can't malloc video modes\n"); return -ENOMEM; } for (i = 0; i < videomode_num; i++) mmpmode_to_fbmode(&videomodes[i], &mmp_modes[i]); fb_videomode_to_modelist(videomodes, videomode_num, &info->modelist); /* set videomode[0] as default mode */ memcpy(&fbi->mode, &videomodes[0], sizeof(struct fb_videomode)); fbi->output_fmt = mmp_modes[0].pix_fmt_out; fb_videomode_to_var(&info->var, &fbi->mode); mmp_path_set_mode(fbi->path, &mmp_modes[0]); kfree(videomodes); return videomode_num; } static int fb_info_setup(struct fb_info *info, struct mmpfb_info *fbi) { int ret = 0; /* Initialise static fb parameters.*/ info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN; info->node = -1; strcpy(info->fix.id, fbi->name); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.type_aux = 0; info->fix.xpanstep = 0; info->fix.ypanstep = info->var.yres; info->fix.ywrapstep = 0; info->fix.accel = FB_ACCEL_NONE; info->fix.smem_start = fbi->fb_start_dma; info->fix.smem_len = fbi->fb_size; info->fix.visual = (fbi->pix_fmt == PIXFMT_PSEUDOCOLOR) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; info->fix.line_length = info->var.xres_virtual * info->var.bits_per_pixel / 8; info->fbops = &mmpfb_ops; info->pseudo_palette = fbi->pseudo_palette; info->screen_base = fbi->fb_start; info->screen_size = fbi->fb_size; /* For FB framework: Allocate color map and Register framebuffer*/ if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) ret = -ENOMEM; return ret; } static void fb_info_clear(struct fb_info *info) { fb_dealloc_cmap(&info->cmap); } static int mmpfb_probe(struct platform_device *pdev) { struct mmp_buffer_driver_mach_info *mi; struct fb_info *info; struct mmpfb_info *fbi; int ret, modes_num; mi = pdev->dev.platform_data; if (mi == NULL) { dev_err(&pdev->dev, "no platform data defined\n"); return -EINVAL; } /* initialize fb */ info = framebuffer_alloc(sizeof(struct mmpfb_info), &pdev->dev); if (info == NULL) return -ENOMEM; fbi = info->par; /* init fb */ fbi->fb_info = info; platform_set_drvdata(pdev, fbi); fbi->dev = &pdev->dev; fbi->name = mi->name; fbi->pix_fmt = mi->default_pixfmt; pixfmt_to_var(&info->var, fbi->pix_fmt); mutex_init(&fbi->access_ok); /* get display path by name */ fbi->path = mmp_get_path(mi->path_name); if (!fbi->path) { dev_err(&pdev->dev, "can't get the path %s\n", mi->path_name); ret = -EINVAL; goto failed_destroy_mutex; } dev_info(fbi->dev, "path %s get\n", fbi->path->name); /* get overlay */ fbi->overlay = mmp_path_get_overlay(fbi->path, mi->overlay_id); if (!fbi->overlay) { ret = -EINVAL; goto failed_destroy_mutex; } /* set fetch used */ mmp_overlay_set_fetch(fbi->overlay, mi->dmafetch_id); modes_num = modes_setup(fbi); if (modes_num < 0) { ret = modes_num; goto failed_destroy_mutex; } /* * if get modes success, means not hotplug panels, use caculated buffer * or use default size */ if (modes_num > 0) { /* fix to 2* yres */ info->var.yres_virtual = info->var.yres * 2; /* Allocate framebuffer memory: size = modes xy *4 */ fbi->fb_size = info->var.xres_virtual * info->var.yres_virtual * info->var.bits_per_pixel / 8; } else { fbi->fb_size = MMPFB_DEFAULT_SIZE; } fbi->fb_start = dma_alloc_coherent(&pdev->dev, PAGE_ALIGN(fbi->fb_size), &fbi->fb_start_dma, GFP_KERNEL); if (fbi->fb_start == NULL) { dev_err(&pdev->dev, "can't alloc framebuffer\n"); ret = -ENOMEM; goto failed_destroy_mutex; } memset(fbi->fb_start, 0, fbi->fb_size); dev_info(fbi->dev, "fb %dk allocated\n", fbi->fb_size/1024); /* fb power on */ if (modes_num > 0) mmpfb_power(fbi, 1); ret = fb_info_setup(info, fbi); if (ret < 0) goto failed_free_buff; ret = register_framebuffer(info); if (ret < 0) { dev_err(&pdev->dev, "Failed to register fb: %d\n", ret); ret = -ENXIO; goto failed_clear_info; } dev_info(fbi->dev, "loaded to /dev/fb%d <%s>.\n", info->node, info->fix.id); #ifdef CONFIG_LOGO if (fbi->fb_start) { fb_prepare_logo(info, 0); fb_show_logo(info, 0); } #endif return 0; failed_clear_info: fb_info_clear(info); failed_free_buff: dma_free_coherent(&pdev->dev, PAGE_ALIGN(fbi->fb_size), fbi->fb_start, fbi->fb_start_dma); failed_destroy_mutex: mutex_destroy(&fbi->access_ok); dev_err(fbi->dev, "mmp-fb: frame buffer device init failed\n"); framebuffer_release(info); return ret; } static struct platform_driver mmpfb_driver = { .driver = { .name = "mmp-fb", }, .probe = mmpfb_probe, }; static int mmpfb_init(void) { return platform_driver_register(&mmpfb_driver); } module_init(mmpfb_init); MODULE_AUTHOR("Zhou Zhu <zhou.zhu@marvell.com>"); MODULE_DESCRIPTION("Framebuffer driver for Marvell displays"); MODULE_LICENSE("GPL");
gpl-2.0
woodbunny/JIT-ASLR-kernel
arch/mips/math-emu/dsemul.c
2466
5190
#include <linux/compiler.h> #include <linux/mm.h> #include <linux/signal.h> #include <linux/smp.h> #include <asm/asm.h> #include <asm/bootinfo.h> #include <asm/byteorder.h> #include <asm/cpu.h> #include <asm/inst.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/branch.h> #include <asm/mipsregs.h> #include <asm/cacheflush.h> #include <asm/fpu_emulator.h> #include "ieee754.h" /* Strap kernel emulator for full MIPS IV emulation */ #ifdef __mips #undef __mips #endif #define __mips 4 /* * Emulate the arbritrary instruction ir at xcp->cp0_epc. Required when * we have to emulate the instruction in a COP1 branch delay slot. Do * not change cp0_epc due to the instruction * * According to the spec: * 1) it shouldn't be a branch :-) * 2) it can be a COP instruction :-( * 3) if we are tring to run a protected memory space we must take * special care on memory access instructions :-( */ /* * "Trampoline" return routine to catch exception following * execution of delay-slot instruction execution. */ struct emuframe { mips_instruction emul; mips_instruction badinst; mips_instruction cookie; unsigned long epc; }; int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc) { extern asmlinkage void handle_dsemulret(void); struct emuframe __user *fr; int err; if ((get_isa16_mode(regs->cp0_epc) && ((ir >> 16) == MM_NOP16)) || (ir == 0)) { /* NOP is easy */ regs->cp0_epc = cpc; regs->cp0_cause &= ~CAUSEF_BD; return 0; } #ifdef DSEMUL_TRACE printk("dsemul %lx %lx\n", regs->cp0_epc, cpc); #endif /* * The strategy is to push the instruction onto the user stack * and put a trap after it which we can catch and jump to * the required address any alternative apart from full * instruction emulation!!. * * Algorithmics used a system call instruction, and * borrowed that vector. MIPS/Linux version is a bit * more heavyweight in the interests of portability and * multiprocessor support. For Linux we generate a * an unaligned access and force an address error exception. * * For embedded systems (stand-alone) we prefer to use a * non-existing CP1 instruction. This prevents us from emulating * branches, but gives us a cleaner interface to the exception * handler (single entry point). */ /* Ensure that the two instructions are in the same cache line */ fr = (struct emuframe __user *) ((regs->regs[29] - sizeof(struct emuframe)) & ~0x7); /* Verify that the stack pointer is not competely insane */ if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe)))) return SIGBUS; if (get_isa16_mode(regs->cp0_epc)) { err = __put_user(ir >> 16, (u16 __user *)(&fr->emul)); err |= __put_user(ir & 0xffff, (u16 __user *)((long)(&fr->emul) + 2)); err |= __put_user(BREAK_MATH >> 16, (u16 __user *)(&fr->badinst)); err |= __put_user(BREAK_MATH & 0xffff, (u16 __user *)((long)(&fr->badinst) + 2)); } else { err = __put_user(ir, &fr->emul); err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst); } err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie); err |= __put_user(cpc, &fr->epc); if (unlikely(err)) { MIPS_FPU_EMU_INC_STATS(errors); return SIGBUS; } regs->cp0_epc = ((unsigned long) &fr->emul) | get_isa16_mode(regs->cp0_epc); flush_cache_sigtramp((unsigned long)&fr->badinst); return SIGILL; /* force out of emulation loop */ } int do_dsemulret(struct pt_regs *xcp) { struct emuframe __user *fr; unsigned long epc; u32 insn, cookie; int err = 0; u16 instr[2]; fr = (struct emuframe __user *) (msk_isa16_mode(xcp->cp0_epc) - sizeof(mips_instruction)); /* * If we can't even access the area, something is very wrong, but we'll * leave that to the default handling */ if (!access_ok(VERIFY_READ, fr, sizeof(struct emuframe))) return 0; /* * Do some sanity checking on the stackframe: * * - Is the instruction pointed to by the EPC an BREAK_MATH? * - Is the following memory word the BD_COOKIE? */ if (get_isa16_mode(xcp->cp0_epc)) { err = __get_user(instr[0], (u16 __user *)(&fr->badinst)); err |= __get_user(instr[1], (u16 __user *)((long)(&fr->badinst) + 2)); insn = (instr[0] << 16) | instr[1]; } else { err = __get_user(insn, &fr->badinst); } err |= __get_user(cookie, &fr->cookie); if (unlikely(err || (insn != BREAK_MATH) || (cookie != BD_COOKIE))) { MIPS_FPU_EMU_INC_STATS(errors); return 0; } /* * At this point, we are satisfied that it's a BD emulation trap. Yes, * a user might have deliberately put two malformed and useless * instructions in a row in his program, in which case he's in for a * nasty surprise - the next instruction will be treated as a * continuation address! Alas, this seems to be the only way that we * can handle signals, recursion, and longjmps() in the context of * emulating the branch delay instruction. */ #ifdef DSEMUL_TRACE printk("dsemulret\n"); #endif if (__get_user(epc, &fr->epc)) { /* Saved EPC */ /* This is not a good situation to be in */ force_sig(SIGBUS, current); return 0; } /* Set EPC to return to post-branch instruction */ xcp->cp0_epc = epc; return 1; }
gpl-2.0
Ki113R/android_kernel_samsung_golden
kernel/freezer.c
2722
4109
/* * kernel/freezer.c - Function to freeze a process * * Originally from kernel/power/process.c */ #include <linux/interrupt.h> #include <linux/suspend.h> #include <linux/module.h> #include <linux/syscalls.h> #include <linux/freezer.h> /* * freezing is complete, mark current process as frozen */ static inline void frozen_process(void) { if (!unlikely(current->flags & PF_NOFREEZE)) { current->flags |= PF_FROZEN; smp_wmb(); } clear_freeze_flag(current); } /* Refrigerator is place where frozen processes are stored :-). */ void refrigerator(void) { /* Hmm, should we be allowed to suspend when there are realtime processes around? */ long save; task_lock(current); if (freezing(current)) { frozen_process(); task_unlock(current); } else { task_unlock(current); return; } save = current->state; pr_debug("%s entered refrigerator\n", current->comm); spin_lock_irq(&current->sighand->siglock); recalc_sigpending(); /* We sent fake signal, clean it up */ spin_unlock_irq(&current->sighand->siglock); /* prevent accounting of that task to load */ current->flags |= PF_FREEZING; for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (!frozen(current)) break; schedule(); } /* Remove the accounting blocker */ current->flags &= ~PF_FREEZING; pr_debug("%s left refrigerator\n", current->comm); __set_current_state(save); } EXPORT_SYMBOL(refrigerator); static void fake_signal_wake_up(struct task_struct *p) { unsigned long flags; spin_lock_irqsave(&p->sighand->siglock, flags); signal_wake_up(p, 0); spin_unlock_irqrestore(&p->sighand->siglock, flags); } /** * freeze_task - send a freeze request to given task * @p: task to send the request to * @sig_only: if set, the request will only be sent if the task has the * PF_FREEZER_NOSIG flag unset * Return value: 'false', if @sig_only is set and the task has * PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise * * The freeze request is sent by setting the tasks's TIF_FREEZE flag and * either sending a fake signal to it or waking it up, depending on whether * or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task * has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its * TIF_FREEZE flag will not be set. */ bool freeze_task(struct task_struct *p, bool sig_only) { /* * We first check if the task is freezing and next if it has already * been frozen to avoid the race with frozen_process() which first marks * the task as frozen and next clears its TIF_FREEZE. */ if (!freezing(p)) { smp_rmb(); if (frozen(p)) return false; if (!sig_only || should_send_signal(p)) set_freeze_flag(p); else return false; } if (should_send_signal(p)) { fake_signal_wake_up(p); /* * fake_signal_wake_up() goes through p's scheduler * lock and guarantees that TASK_STOPPED/TRACED -> * TASK_RUNNING transition can't race with task state * testing in try_to_freeze_tasks(). */ } else if (sig_only) { return false; } else { wake_up_state(p, TASK_INTERRUPTIBLE); } return true; } void cancel_freezing(struct task_struct *p) { unsigned long flags; if (freezing(p)) { pr_debug(" clean up: %s\n", p->comm); clear_freeze_flag(p); spin_lock_irqsave(&p->sighand->siglock, flags); recalc_sigpending_and_wake(p); spin_unlock_irqrestore(&p->sighand->siglock, flags); } } static int __thaw_process(struct task_struct *p) { if (frozen(p)) { p->flags &= ~PF_FROZEN; return 1; } clear_freeze_flag(p); return 0; } /* * Wake up a frozen process * * task_lock() is needed to prevent the race with refrigerator() which may * occur if the freezing of tasks fails. Namely, without the lock, if the * freezing of tasks failed, thaw_tasks() might have run before a task in * refrigerator() could call frozen_process(), in which case the task would be * frozen and no one would thaw it. */ int thaw_process(struct task_struct *p) { task_lock(p); if (__thaw_process(p) == 1) { task_unlock(p); wake_up_process(p); return 1; } task_unlock(p); return 0; } EXPORT_SYMBOL(thaw_process);
gpl-2.0
eebssk1/CAF_MSM_Kernel_msm8916_64
sound/isa/ad1816a/ad1816a.c
2722
8987
/* card-ad1816a.c - driver for ADI SoundPort AD1816A based soundcards. Copyright (C) 2000 by Massimo Piccioni <dafastidio@libero.it> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/pnp.h> #include <linux/module.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/ad1816a.h> #include <sound/mpu401.h> #include <sound/opl3.h> #define PFX "ad1816a: " MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>"); MODULE_DESCRIPTION("AD1816A, AD1815"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Highscreen,Sound-Boostar 16 3D}," "{Analog Devices,AD1815}," "{Analog Devices,AD1816A}," "{TerraTec,Base 64}," "{TerraTec,AudioSystem EWS64S}," "{Aztech/Newcom SC-16 3D}," "{Shark Predator ISA}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 1-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; /* Enable this card */ static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */ static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */ static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */ static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */ static int clockfreq[SNDRV_CARDS]; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for ad1816a based soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for ad1816a based soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable ad1816a based soundcard."); module_param_array(clockfreq, int, NULL, 0444); MODULE_PARM_DESC(clockfreq, "Clock frequency for ad1816a driver (default = 0)."); static struct pnp_card_device_id snd_ad1816a_pnpids[] = { /* Analog Devices AD1815 */ { .id = "ADS7150", .devs = { { .id = "ADS7150" }, { .id = "ADS7151" } } }, /* Analog Device AD1816? */ { .id = "ADS7180", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } }, /* Analog Devices AD1816A - added by Kenneth Platz <kxp@atl.hp.com> */ { .id = "ADS7181", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } }, /* Analog Devices AD1816A - Aztech/Newcom SC-16 3D */ { .id = "AZT1022", .devs = { { .id = "AZT1018" }, { .id = "AZT2002" } } }, /* Highscreen Sound-Boostar 16 3D - added by Stefan Behnel */ { .id = "LWC1061", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } }, /* Highscreen Sound-Boostar 16 3D */ { .id = "MDK1605", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } }, /* Shark Predator ISA - added by Ken Arromdee */ { .id = "SMM7180", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } }, /* Analog Devices AD1816A - Terratec AudioSystem EWS64 S */ { .id = "TER1112", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } }, /* Analog Devices AD1816A - Terratec AudioSystem EWS64 S */ { .id = "TER1112", .devs = { { .id = "TER1100" }, { .id = "TER1101" } } }, /* Analog Devices AD1816A - Terratec Base 64 */ { .id = "TER1411", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } }, /* end */ { .id = "" } }; MODULE_DEVICE_TABLE(pnp_card, snd_ad1816a_pnpids); #define DRIVER_NAME "snd-card-ad1816a" static int snd_card_ad1816a_pnp(int dev, struct pnp_card_link *card, const struct pnp_card_device_id *id) { struct pnp_dev *pdev; int err; pdev = pnp_request_card_device(card, id->devs[0].id, NULL); if (pdev == NULL) return -EBUSY; err = pnp_activate_dev(pdev); if (err < 0) { printk(KERN_ERR PFX "AUDIO PnP configure failure\n"); return -EBUSY; } port[dev] = pnp_port_start(pdev, 2); fm_port[dev] = pnp_port_start(pdev, 1); dma1[dev] = pnp_dma(pdev, 0); dma2[dev] = pnp_dma(pdev, 1); irq[dev] = pnp_irq(pdev, 0); pdev = pnp_request_card_device(card, id->devs[1].id, NULL); if (pdev == NULL) { mpu_port[dev] = -1; snd_printk(KERN_WARNING PFX "MPU401 device busy, skipping.\n"); return 0; } err = pnp_activate_dev(pdev); if (err < 0) { printk(KERN_ERR PFX "MPU401 PnP configure failure\n"); mpu_port[dev] = -1; } else { mpu_port[dev] = pnp_port_start(pdev, 0); mpu_irq[dev] = pnp_irq(pdev, 0); } return 0; } static int snd_card_ad1816a_probe(int dev, struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { int error; struct snd_card *card; struct snd_ad1816a *chip; struct snd_opl3 *opl3; struct snd_timer *timer; error = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct snd_ad1816a), &card); if (error < 0) return error; chip = card->private_data; if ((error = snd_card_ad1816a_pnp(dev, pcard, pid))) { snd_card_free(card); return error; } snd_card_set_dev(card, &pcard->card->dev); if ((error = snd_ad1816a_create(card, port[dev], irq[dev], dma1[dev], dma2[dev], chip)) < 0) { snd_card_free(card); return error; } if (clockfreq[dev] >= 5000 && clockfreq[dev] <= 100000) chip->clock_freq = clockfreq[dev]; strcpy(card->driver, "AD1816A"); strcpy(card->shortname, "ADI SoundPort AD1816A"); sprintf(card->longname, "%s, SS at 0x%lx, irq %d, dma %d&%d", card->shortname, chip->port, irq[dev], dma1[dev], dma2[dev]); if ((error = snd_ad1816a_pcm(chip, 0, NULL)) < 0) { snd_card_free(card); return error; } if ((error = snd_ad1816a_mixer(chip)) < 0) { snd_card_free(card); return error; } error = snd_ad1816a_timer(chip, 0, &timer); if (error < 0) { snd_card_free(card); return error; } if (mpu_port[dev] > 0) { if (snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401, mpu_port[dev], 0, mpu_irq[dev], NULL) < 0) printk(KERN_ERR PFX "no MPU-401 device at 0x%lx.\n", mpu_port[dev]); } if (fm_port[dev] > 0) { if (snd_opl3_create(card, fm_port[dev], fm_port[dev] + 2, OPL3_HW_AUTO, 0, &opl3) < 0) { printk(KERN_ERR PFX "no OPL device at 0x%lx-0x%lx.\n", fm_port[dev], fm_port[dev] + 2); } else { error = snd_opl3_hwdep_new(opl3, 0, 1, NULL); if (error < 0) { snd_card_free(card); return error; } } } if ((error = snd_card_register(card)) < 0) { snd_card_free(card); return error; } pnp_set_card_drvdata(pcard, card); return 0; } static unsigned int ad1816a_devices; static int snd_ad1816a_pnp_detect(struct pnp_card_link *card, const struct pnp_card_device_id *id) { static int dev; int res; for ( ; dev < SNDRV_CARDS; dev++) { if (!enable[dev]) continue; res = snd_card_ad1816a_probe(dev, card, id); if (res < 0) return res; dev++; ad1816a_devices++; return 0; } return -ENODEV; } static void snd_ad1816a_pnp_remove(struct pnp_card_link *pcard) { snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); } #ifdef CONFIG_PM static int snd_ad1816a_pnp_suspend(struct pnp_card_link *pcard, pm_message_t state) { struct snd_card *card = pnp_get_card_drvdata(pcard); snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_ad1816a_suspend(card->private_data); return 0; } static int snd_ad1816a_pnp_resume(struct pnp_card_link *pcard) { struct snd_card *card = pnp_get_card_drvdata(pcard); snd_ad1816a_resume(card->private_data); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif static struct pnp_card_driver ad1816a_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = "ad1816a", .id_table = snd_ad1816a_pnpids, .probe = snd_ad1816a_pnp_detect, .remove = snd_ad1816a_pnp_remove, #ifdef CONFIG_PM .suspend = snd_ad1816a_pnp_suspend, .resume = snd_ad1816a_pnp_resume, #endif }; static int __init alsa_card_ad1816a_init(void) { int err; err = pnp_register_card_driver(&ad1816a_pnpc_driver); if (err) return err; if (!ad1816a_devices) { pnp_unregister_card_driver(&ad1816a_pnpc_driver); #ifdef MODULE printk(KERN_ERR "no AD1816A based soundcards found.\n"); #endif /* MODULE */ return -ENODEV; } return 0; } static void __exit alsa_card_ad1816a_exit(void) { pnp_unregister_card_driver(&ad1816a_pnpc_driver); } module_init(alsa_card_ad1816a_init) module_exit(alsa_card_ad1816a_exit)
gpl-2.0
TheMeddlingMonk/android_kernel_toshiba_tostab03
lib/locking-selftest.c
3490
29316
/* * lib/locking-selftest.c * * Testsuite for various locking APIs: spinlocks, rwlocks, * mutexes and rw-semaphores. * * It is checking both false positives and false negatives. * * Started by Ingo Molnar: * * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> */ #include <linux/rwsem.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/lockdep.h> #include <linux/spinlock.h> #include <linux/kallsyms.h> #include <linux/interrupt.h> #include <linux/debug_locks.h> #include <linux/irqflags.h> /* * Change this to 1 if you want to see the failure printouts: */ static unsigned int debug_locks_verbose; static int __init setup_debug_locks_verbose(char *str) { get_option(&str, &debug_locks_verbose); return 1; } __setup("debug_locks_verbose=", setup_debug_locks_verbose); #define FAILURE 0 #define SUCCESS 1 #define LOCKTYPE_SPIN 0x1 #define LOCKTYPE_RWLOCK 0x2 #define LOCKTYPE_MUTEX 0x4 #define LOCKTYPE_RWSEM 0x8 /* * Normal standalone locks, for the circular and irq-context * dependency tests: */ static DEFINE_SPINLOCK(lock_A); static DEFINE_SPINLOCK(lock_B); static DEFINE_SPINLOCK(lock_C); static DEFINE_SPINLOCK(lock_D); static DEFINE_RWLOCK(rwlock_A); static DEFINE_RWLOCK(rwlock_B); static DEFINE_RWLOCK(rwlock_C); static DEFINE_RWLOCK(rwlock_D); static DEFINE_MUTEX(mutex_A); static DEFINE_MUTEX(mutex_B); static DEFINE_MUTEX(mutex_C); static DEFINE_MUTEX(mutex_D); static DECLARE_RWSEM(rwsem_A); static DECLARE_RWSEM(rwsem_B); static DECLARE_RWSEM(rwsem_C); static DECLARE_RWSEM(rwsem_D); /* * Locks that we initialize dynamically as well so that * e.g. X1 and X2 becomes two instances of the same class, * but X* and Y* are different classes. We do this so that * we do not trigger a real lockup: */ static DEFINE_SPINLOCK(lock_X1); static DEFINE_SPINLOCK(lock_X2); static DEFINE_SPINLOCK(lock_Y1); static DEFINE_SPINLOCK(lock_Y2); static DEFINE_SPINLOCK(lock_Z1); static DEFINE_SPINLOCK(lock_Z2); static DEFINE_RWLOCK(rwlock_X1); static DEFINE_RWLOCK(rwlock_X2); static DEFINE_RWLOCK(rwlock_Y1); static DEFINE_RWLOCK(rwlock_Y2); static DEFINE_RWLOCK(rwlock_Z1); static DEFINE_RWLOCK(rwlock_Z2); static DEFINE_MUTEX(mutex_X1); static DEFINE_MUTEX(mutex_X2); static DEFINE_MUTEX(mutex_Y1); static DEFINE_MUTEX(mutex_Y2); static DEFINE_MUTEX(mutex_Z1); static DEFINE_MUTEX(mutex_Z2); static DECLARE_RWSEM(rwsem_X1); static DECLARE_RWSEM(rwsem_X2); static DECLARE_RWSEM(rwsem_Y1); static DECLARE_RWSEM(rwsem_Y2); static DECLARE_RWSEM(rwsem_Z1); static DECLARE_RWSEM(rwsem_Z2); /* * non-inlined runtime initializers, to let separate locks share * the same lock-class: */ #define INIT_CLASS_FUNC(class) \ static noinline void \ init_class_##class(spinlock_t *lock, rwlock_t *rwlock, struct mutex *mutex, \ struct rw_semaphore *rwsem) \ { \ spin_lock_init(lock); \ rwlock_init(rwlock); \ mutex_init(mutex); \ init_rwsem(rwsem); \ } INIT_CLASS_FUNC(X) INIT_CLASS_FUNC(Y) INIT_CLASS_FUNC(Z) static void init_shared_classes(void) { init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1); init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2); init_class_Y(&lock_Y1, &rwlock_Y1, &mutex_Y1, &rwsem_Y1); init_class_Y(&lock_Y2, &rwlock_Y2, &mutex_Y2, &rwsem_Y2); init_class_Z(&lock_Z1, &rwlock_Z1, &mutex_Z1, &rwsem_Z1); init_class_Z(&lock_Z2, &rwlock_Z2, &mutex_Z2, &rwsem_Z2); } /* * For spinlocks and rwlocks we also do hardirq-safe / softirq-safe tests. * The following functions use a lock from a simulated hardirq/softirq * context, causing the locks to be marked as hardirq-safe/softirq-safe: */ #define HARDIRQ_DISABLE local_irq_disable #define HARDIRQ_ENABLE local_irq_enable #define HARDIRQ_ENTER() \ local_irq_disable(); \ __irq_enter(); \ WARN_ON(!in_irq()); #define HARDIRQ_EXIT() \ __irq_exit(); \ local_irq_enable(); #define SOFTIRQ_DISABLE local_bh_disable #define SOFTIRQ_ENABLE local_bh_enable #define SOFTIRQ_ENTER() \ local_bh_disable(); \ local_irq_disable(); \ lockdep_softirq_enter(); \ WARN_ON(!in_softirq()); #define SOFTIRQ_EXIT() \ lockdep_softirq_exit(); \ local_irq_enable(); \ local_bh_enable(); /* * Shortcuts for lock/unlock API variants, to keep * the testcases compact: */ #define L(x) spin_lock(&lock_##x) #define U(x) spin_unlock(&lock_##x) #define LU(x) L(x); U(x) #define SI(x) spin_lock_init(&lock_##x) #define WL(x) write_lock(&rwlock_##x) #define WU(x) write_unlock(&rwlock_##x) #define WLU(x) WL(x); WU(x) #define RL(x) read_lock(&rwlock_##x) #define RU(x) read_unlock(&rwlock_##x) #define RLU(x) RL(x); RU(x) #define RWI(x) rwlock_init(&rwlock_##x) #define ML(x) mutex_lock(&mutex_##x) #define MU(x) mutex_unlock(&mutex_##x) #define MI(x) mutex_init(&mutex_##x) #define WSL(x) down_write(&rwsem_##x) #define WSU(x) up_write(&rwsem_##x) #define RSL(x) down_read(&rwsem_##x) #define RSU(x) up_read(&rwsem_##x) #define RWSI(x) init_rwsem(&rwsem_##x) #define LOCK_UNLOCK_2(x,y) LOCK(x); LOCK(y); UNLOCK(y); UNLOCK(x) /* * Generate different permutations of the same testcase, using * the same basic lock-dependency/state events: */ #define GENERATE_TESTCASE(name) \ \ static void name(void) { E(); } #define GENERATE_PERMUTATIONS_2_EVENTS(name) \ \ static void name##_12(void) { E1(); E2(); } \ static void name##_21(void) { E2(); E1(); } #define GENERATE_PERMUTATIONS_3_EVENTS(name) \ \ static void name##_123(void) { E1(); E2(); E3(); } \ static void name##_132(void) { E1(); E3(); E2(); } \ static void name##_213(void) { E2(); E1(); E3(); } \ static void name##_231(void) { E2(); E3(); E1(); } \ static void name##_312(void) { E3(); E1(); E2(); } \ static void name##_321(void) { E3(); E2(); E1(); } /* * AA deadlock: */ #define E() \ \ LOCK(X1); \ LOCK(X2); /* this one should fail */ /* * 6 testcases: */ #include "locking-selftest-spin.h" GENERATE_TESTCASE(AA_spin) #include "locking-selftest-wlock.h" GENERATE_TESTCASE(AA_wlock) #include "locking-selftest-rlock.h" GENERATE_TESTCASE(AA_rlock) #include "locking-selftest-mutex.h" GENERATE_TESTCASE(AA_mutex) #include "locking-selftest-wsem.h" GENERATE_TESTCASE(AA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(AA_rsem) #undef E /* * Special-case for read-locking, they are * allowed to recurse on the same lock class: */ static void rlock_AA1(void) { RL(X1); RL(X1); // this one should NOT fail } static void rlock_AA1B(void) { RL(X1); RL(X2); // this one should NOT fail } static void rsem_AA1(void) { RSL(X1); RSL(X1); // this one should fail } static void rsem_AA1B(void) { RSL(X1); RSL(X2); // this one should fail } /* * The mixing of read and write locks is not allowed: */ static void rlock_AA2(void) { RL(X1); WL(X2); // this one should fail } static void rsem_AA2(void) { RSL(X1); WSL(X2); // this one should fail } static void rlock_AA3(void) { WL(X1); RL(X2); // this one should fail } static void rsem_AA3(void) { WSL(X1); RSL(X2); // this one should fail } /* * ABBA deadlock: */ #define E() \ \ LOCK_UNLOCK_2(A, B); \ LOCK_UNLOCK_2(B, A); /* fail */ /* * 6 testcases: */ #include "locking-selftest-spin.h" GENERATE_TESTCASE(ABBA_spin) #include "locking-selftest-wlock.h" GENERATE_TESTCASE(ABBA_wlock) #include "locking-selftest-rlock.h" GENERATE_TESTCASE(ABBA_rlock) #include "locking-selftest-mutex.h" GENERATE_TESTCASE(ABBA_mutex) #include "locking-selftest-wsem.h" GENERATE_TESTCASE(ABBA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABBA_rsem) #undef E /* * AB BC CA deadlock: */ #define E() \ \ LOCK_UNLOCK_2(A, B); \ LOCK_UNLOCK_2(B, C); \ LOCK_UNLOCK_2(C, A); /* fail */ /* * 6 testcases: */ #include "locking-selftest-spin.h" GENERATE_TESTCASE(ABBCCA_spin) #include "locking-selftest-wlock.h" GENERATE_TESTCASE(ABBCCA_wlock) #include "locking-selftest-rlock.h" GENERATE_TESTCASE(ABBCCA_rlock) #include "locking-selftest-mutex.h" GENERATE_TESTCASE(ABBCCA_mutex) #include "locking-selftest-wsem.h" GENERATE_TESTCASE(ABBCCA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABBCCA_rsem) #undef E /* * AB CA BC deadlock: */ #define E() \ \ LOCK_UNLOCK_2(A, B); \ LOCK_UNLOCK_2(C, A); \ LOCK_UNLOCK_2(B, C); /* fail */ /* * 6 testcases: */ #include "locking-selftest-spin.h" GENERATE_TESTCASE(ABCABC_spin) #include "locking-selftest-wlock.h" GENERATE_TESTCASE(ABCABC_wlock) #include "locking-selftest-rlock.h" GENERATE_TESTCASE(ABCABC_rlock) #include "locking-selftest-mutex.h" GENERATE_TESTCASE(ABCABC_mutex) #include "locking-selftest-wsem.h" GENERATE_TESTCASE(ABCABC_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABCABC_rsem) #undef E /* * AB BC CD DA deadlock: */ #define E() \ \ LOCK_UNLOCK_2(A, B); \ LOCK_UNLOCK_2(B, C); \ LOCK_UNLOCK_2(C, D); \ LOCK_UNLOCK_2(D, A); /* fail */ /* * 6 testcases: */ #include "locking-selftest-spin.h" GENERATE_TESTCASE(ABBCCDDA_spin) #include "locking-selftest-wlock.h" GENERATE_TESTCASE(ABBCCDDA_wlock) #include "locking-selftest-rlock.h" GENERATE_TESTCASE(ABBCCDDA_rlock) #include "locking-selftest-mutex.h" GENERATE_TESTCASE(ABBCCDDA_mutex) #include "locking-selftest-wsem.h" GENERATE_TESTCASE(ABBCCDDA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABBCCDDA_rsem) #undef E /* * AB CD BD DA deadlock: */ #define E() \ \ LOCK_UNLOCK_2(A, B); \ LOCK_UNLOCK_2(C, D); \ LOCK_UNLOCK_2(B, D); \ LOCK_UNLOCK_2(D, A); /* fail */ /* * 6 testcases: */ #include "locking-selftest-spin.h" GENERATE_TESTCASE(ABCDBDDA_spin) #include "locking-selftest-wlock.h" GENERATE_TESTCASE(ABCDBDDA_wlock) #include "locking-selftest-rlock.h" GENERATE_TESTCASE(ABCDBDDA_rlock) #include "locking-selftest-mutex.h" GENERATE_TESTCASE(ABCDBDDA_mutex) #include "locking-selftest-wsem.h" GENERATE_TESTCASE(ABCDBDDA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABCDBDDA_rsem) #undef E /* * AB CD BC DA deadlock: */ #define E() \ \ LOCK_UNLOCK_2(A, B); \ LOCK_UNLOCK_2(C, D); \ LOCK_UNLOCK_2(B, C); \ LOCK_UNLOCK_2(D, A); /* fail */ /* * 6 testcases: */ #include "locking-selftest-spin.h" GENERATE_TESTCASE(ABCDBCDA_spin) #include "locking-selftest-wlock.h" GENERATE_TESTCASE(ABCDBCDA_wlock) #include "locking-selftest-rlock.h" GENERATE_TESTCASE(ABCDBCDA_rlock) #include "locking-selftest-mutex.h" GENERATE_TESTCASE(ABCDBCDA_mutex) #include "locking-selftest-wsem.h" GENERATE_TESTCASE(ABCDBCDA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABCDBCDA_rsem) #undef E /* * Double unlock: */ #define E() \ \ LOCK(A); \ UNLOCK(A); \ UNLOCK(A); /* fail */ /* * 6 testcases: */ #include "locking-selftest-spin.h" GENERATE_TESTCASE(double_unlock_spin) #include "locking-selftest-wlock.h" GENERATE_TESTCASE(double_unlock_wlock) #include "locking-selftest-rlock.h" GENERATE_TESTCASE(double_unlock_rlock) #include "locking-selftest-mutex.h" GENERATE_TESTCASE(double_unlock_mutex) #include "locking-selftest-wsem.h" GENERATE_TESTCASE(double_unlock_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(double_unlock_rsem) #undef E /* * Bad unlock ordering: */ #define E() \ \ LOCK(A); \ LOCK(B); \ UNLOCK(A); /* fail */ \ UNLOCK(B); /* * 6 testcases: */ #include "locking-selftest-spin.h" GENERATE_TESTCASE(bad_unlock_order_spin) #include "locking-selftest-wlock.h" GENERATE_TESTCASE(bad_unlock_order_wlock) #include "locking-selftest-rlock.h" GENERATE_TESTCASE(bad_unlock_order_rlock) #include "locking-selftest-mutex.h" GENERATE_TESTCASE(bad_unlock_order_mutex) #include "locking-selftest-wsem.h" GENERATE_TESTCASE(bad_unlock_order_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(bad_unlock_order_rsem) #undef E /* * initializing a held lock: */ #define E() \ \ LOCK(A); \ INIT(A); /* fail */ /* * 6 testcases: */ #include "locking-selftest-spin.h" GENERATE_TESTCASE(init_held_spin) #include "locking-selftest-wlock.h" GENERATE_TESTCASE(init_held_wlock) #include "locking-selftest-rlock.h" GENERATE_TESTCASE(init_held_rlock) #include "locking-selftest-mutex.h" GENERATE_TESTCASE(init_held_mutex) #include "locking-selftest-wsem.h" GENERATE_TESTCASE(init_held_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(init_held_rsem) #undef E /* * locking an irq-safe lock with irqs enabled: */ #define E1() \ \ IRQ_ENTER(); \ LOCK(A); \ UNLOCK(A); \ IRQ_EXIT(); #define E2() \ \ LOCK(A); \ UNLOCK(A); /* * Generate 24 testcases: */ #include "locking-selftest-spin-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin) #include "locking-selftest-rlock-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock) #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin) #include "locking-selftest-rlock-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock) #undef E1 #undef E2 /* * Enabling hardirqs with a softirq-safe lock held: */ #define E1() \ \ SOFTIRQ_ENTER(); \ LOCK(A); \ UNLOCK(A); \ SOFTIRQ_EXIT(); #define E2() \ \ HARDIRQ_DISABLE(); \ LOCK(A); \ HARDIRQ_ENABLE(); \ UNLOCK(A); /* * Generate 12 testcases: */ #include "locking-selftest-spin.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_spin) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_wlock) #include "locking-selftest-rlock.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock) #undef E1 #undef E2 /* * Enabling irqs with an irq-safe lock held: */ #define E1() \ \ IRQ_ENTER(); \ LOCK(A); \ UNLOCK(A); \ IRQ_EXIT(); #define E2() \ \ IRQ_DISABLE(); \ LOCK(A); \ IRQ_ENABLE(); \ UNLOCK(A); /* * Generate 24 testcases: */ #include "locking-selftest-spin-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin) #include "locking-selftest-rlock-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock) #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin) #include "locking-selftest-rlock-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) #undef E1 #undef E2 /* * Acquiring a irq-unsafe lock while holding an irq-safe-lock: */ #define E1() \ \ LOCK(A); \ LOCK(B); \ UNLOCK(B); \ UNLOCK(A); \ #define E2() \ \ LOCK(B); \ UNLOCK(B); #define E3() \ \ IRQ_ENTER(); \ LOCK(A); \ UNLOCK(A); \ IRQ_EXIT(); /* * Generate 36 testcases: */ #include "locking-selftest-spin-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin) #include "locking-selftest-rlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock) #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin) #include "locking-selftest-rlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) #undef E1 #undef E2 #undef E3 /* * If a lock turns into softirq-safe, but earlier it took * a softirq-unsafe lock: */ #define E1() \ IRQ_DISABLE(); \ LOCK(A); \ LOCK(B); \ UNLOCK(B); \ UNLOCK(A); \ IRQ_ENABLE(); #define E2() \ LOCK(B); \ UNLOCK(B); #define E3() \ IRQ_ENTER(); \ LOCK(A); \ UNLOCK(A); \ IRQ_EXIT(); /* * Generate 36 testcases: */ #include "locking-selftest-spin-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin) #include "locking-selftest-rlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock) #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin) #include "locking-selftest-rlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock) #undef E1 #undef E2 #undef E3 /* * read-lock / write-lock irq inversion. * * Deadlock scenario: * * CPU#1 is at #1, i.e. it has write-locked A, but has not * taken B yet. * * CPU#2 is at #2, i.e. it has locked B. * * Hardirq hits CPU#2 at point #2 and is trying to read-lock A. * * The deadlock occurs because CPU#1 will spin on B, and CPU#2 * will spin on A. */ #define E1() \ \ IRQ_DISABLE(); \ WL(A); \ LOCK(B); \ UNLOCK(B); \ WU(A); \ IRQ_ENABLE(); #define E2() \ \ LOCK(B); \ UNLOCK(B); #define E3() \ \ IRQ_ENTER(); \ RL(A); \ RU(A); \ IRQ_EXIT(); /* * Generate 36 testcases: */ #include "locking-selftest-spin-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_spin) #include "locking-selftest-rlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock) #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin) #include "locking-selftest-rlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock) #undef E1 #undef E2 #undef E3 /* * read-lock / write-lock recursion that is actually safe. */ #define E1() \ \ IRQ_DISABLE(); \ WL(A); \ WU(A); \ IRQ_ENABLE(); #define E2() \ \ RL(A); \ RU(A); \ #define E3() \ \ IRQ_ENTER(); \ RL(A); \ L(B); \ U(B); \ RU(A); \ IRQ_EXIT(); /* * Generate 12 testcases: */ #include "locking-selftest-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard) #include "locking-selftest-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) #undef E1 #undef E2 #undef E3 /* * read-lock / write-lock recursion that is unsafe. */ #define E1() \ \ IRQ_DISABLE(); \ L(B); \ WL(A); \ WU(A); \ U(B); \ IRQ_ENABLE(); #define E2() \ \ RL(A); \ RU(A); \ #define E3() \ \ IRQ_ENTER(); \ L(B); \ U(B); \ IRQ_EXIT(); /* * Generate 12 testcases: */ #include "locking-selftest-hardirq.h" // GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard) #include "locking-selftest-softirq.h" // GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft) #ifdef CONFIG_DEBUG_LOCK_ALLOC # define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map) # define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map) # define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map) # define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map) #else # define I_SPINLOCK(x) # define I_RWLOCK(x) # define I_MUTEX(x) # define I_RWSEM(x) #endif #define I1(x) \ do { \ I_SPINLOCK(x); \ I_RWLOCK(x); \ I_MUTEX(x); \ I_RWSEM(x); \ } while (0) #define I2(x) \ do { \ spin_lock_init(&lock_##x); \ rwlock_init(&rwlock_##x); \ mutex_init(&mutex_##x); \ init_rwsem(&rwsem_##x); \ } while (0) static void reset_locks(void) { local_irq_disable(); I1(A); I1(B); I1(C); I1(D); I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2); lockdep_reset(); I2(A); I2(B); I2(C); I2(D); init_shared_classes(); local_irq_enable(); } #undef I static int testcase_total; static int testcase_successes; static int expected_testcase_failures; static int unexpected_testcase_failures; static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) { unsigned long saved_preempt_count = preempt_count(); int expected_failure = 0; WARN_ON(irqs_disabled()); testcase_fn(); /* * Filter out expected failures: */ #ifndef CONFIG_PROVE_LOCKING if ((lockclass_mask & LOCKTYPE_SPIN) && debug_locks != expected) expected_failure = 1; if ((lockclass_mask & LOCKTYPE_RWLOCK) && debug_locks != expected) expected_failure = 1; if ((lockclass_mask & LOCKTYPE_MUTEX) && debug_locks != expected) expected_failure = 1; if ((lockclass_mask & LOCKTYPE_RWSEM) && debug_locks != expected) expected_failure = 1; #endif if (debug_locks != expected) { if (expected_failure) { expected_testcase_failures++; printk("failed|"); } else { unexpected_testcase_failures++; printk("FAILED|"); dump_stack(); } } else { testcase_successes++; printk(" ok |"); } testcase_total++; if (debug_locks_verbose) printk(" lockclass mask: %x, debug_locks: %d, expected: %d\n", lockclass_mask, debug_locks, expected); /* * Some tests (e.g. double-unlock) might corrupt the preemption * count, so restore it: */ preempt_count() = saved_preempt_count; #ifdef CONFIG_TRACE_IRQFLAGS if (softirq_count()) current->softirqs_enabled = 0; else current->softirqs_enabled = 1; #endif reset_locks(); } static inline void print_testname(const char *testname) { printk("%33s:", testname); } #define DO_TESTCASE_1(desc, name, nr) \ print_testname(desc"/"#nr); \ dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ printk("\n"); #define DO_TESTCASE_1B(desc, name, nr) \ print_testname(desc"/"#nr); \ dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \ printk("\n"); #define DO_TESTCASE_3(desc, name, nr) \ print_testname(desc"/"#nr); \ dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \ dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ printk("\n"); #define DO_TESTCASE_3RW(desc, name, nr) \ print_testname(desc"/"#nr); \ dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN|LOCKTYPE_RWLOCK);\ dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ printk("\n"); #define DO_TESTCASE_6(desc, name) \ print_testname(desc); \ dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \ dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \ dotest(name##_rlock, FAILURE, LOCKTYPE_RWLOCK); \ dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ printk("\n"); #define DO_TESTCASE_6_SUCCESS(desc, name) \ print_testname(desc); \ dotest(name##_spin, SUCCESS, LOCKTYPE_SPIN); \ dotest(name##_wlock, SUCCESS, LOCKTYPE_RWLOCK); \ dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \ dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \ dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \ dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \ printk("\n"); /* * 'read' variant: rlocks must not trigger. */ #define DO_TESTCASE_6R(desc, name) \ print_testname(desc); \ dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \ dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \ dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \ dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ printk("\n"); #define DO_TESTCASE_2I(desc, name, nr) \ DO_TESTCASE_1("hard-"desc, name##_hard, nr); \ DO_TESTCASE_1("soft-"desc, name##_soft, nr); #define DO_TESTCASE_2IB(desc, name, nr) \ DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \ DO_TESTCASE_1B("soft-"desc, name##_soft, nr); #define DO_TESTCASE_6I(desc, name, nr) \ DO_TESTCASE_3("hard-"desc, name##_hard, nr); \ DO_TESTCASE_3("soft-"desc, name##_soft, nr); #define DO_TESTCASE_6IRW(desc, name, nr) \ DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \ DO_TESTCASE_3RW("soft-"desc, name##_soft, nr); #define DO_TESTCASE_2x3(desc, name) \ DO_TESTCASE_3(desc, name, 12); \ DO_TESTCASE_3(desc, name, 21); #define DO_TESTCASE_2x6(desc, name) \ DO_TESTCASE_6I(desc, name, 12); \ DO_TESTCASE_6I(desc, name, 21); #define DO_TESTCASE_6x2(desc, name) \ DO_TESTCASE_2I(desc, name, 123); \ DO_TESTCASE_2I(desc, name, 132); \ DO_TESTCASE_2I(desc, name, 213); \ DO_TESTCASE_2I(desc, name, 231); \ DO_TESTCASE_2I(desc, name, 312); \ DO_TESTCASE_2I(desc, name, 321); #define DO_TESTCASE_6x2B(desc, name) \ DO_TESTCASE_2IB(desc, name, 123); \ DO_TESTCASE_2IB(desc, name, 132); \ DO_TESTCASE_2IB(desc, name, 213); \ DO_TESTCASE_2IB(desc, name, 231); \ DO_TESTCASE_2IB(desc, name, 312); \ DO_TESTCASE_2IB(desc, name, 321); #define DO_TESTCASE_6x6(desc, name) \ DO_TESTCASE_6I(desc, name, 123); \ DO_TESTCASE_6I(desc, name, 132); \ DO_TESTCASE_6I(desc, name, 213); \ DO_TESTCASE_6I(desc, name, 231); \ DO_TESTCASE_6I(desc, name, 312); \ DO_TESTCASE_6I(desc, name, 321); #define DO_TESTCASE_6x6RW(desc, name) \ DO_TESTCASE_6IRW(desc, name, 123); \ DO_TESTCASE_6IRW(desc, name, 132); \ DO_TESTCASE_6IRW(desc, name, 213); \ DO_TESTCASE_6IRW(desc, name, 231); \ DO_TESTCASE_6IRW(desc, name, 312); \ DO_TESTCASE_6IRW(desc, name, 321); void locking_selftest(void) { /* * Got a locking failure before the selftest ran? */ if (!debug_locks) { printk("----------------------------------\n"); printk("| Locking API testsuite disabled |\n"); printk("----------------------------------\n"); return; } /* * Run the testsuite: */ printk("------------------------\n"); printk("| Locking API testsuite:\n"); printk("----------------------------------------------------------------------------\n"); printk(" | spin |wlock |rlock |mutex | wsem | rsem |\n"); printk(" --------------------------------------------------------------------------\n"); init_shared_classes(); debug_locks_silent = !debug_locks_verbose; DO_TESTCASE_6R("A-A deadlock", AA); DO_TESTCASE_6R("A-B-B-A deadlock", ABBA); DO_TESTCASE_6R("A-B-B-C-C-A deadlock", ABBCCA); DO_TESTCASE_6R("A-B-C-A-B-C deadlock", ABCABC); DO_TESTCASE_6R("A-B-B-C-C-D-D-A deadlock", ABBCCDDA); DO_TESTCASE_6R("A-B-C-D-B-D-D-A deadlock", ABCDBDDA); DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA); DO_TESTCASE_6("double unlock", double_unlock); DO_TESTCASE_6("initialize held", init_held); DO_TESTCASE_6_SUCCESS("bad unlock order", bad_unlock_order); printk(" --------------------------------------------------------------------------\n"); print_testname("recursive read-lock"); printk(" |"); dotest(rlock_AA1, SUCCESS, LOCKTYPE_RWLOCK); printk(" |"); dotest(rsem_AA1, FAILURE, LOCKTYPE_RWSEM); printk("\n"); print_testname("recursive read-lock #2"); printk(" |"); dotest(rlock_AA1B, SUCCESS, LOCKTYPE_RWLOCK); printk(" |"); dotest(rsem_AA1B, FAILURE, LOCKTYPE_RWSEM); printk("\n"); print_testname("mixed read-write-lock"); printk(" |"); dotest(rlock_AA2, FAILURE, LOCKTYPE_RWLOCK); printk(" |"); dotest(rsem_AA2, FAILURE, LOCKTYPE_RWSEM); printk("\n"); print_testname("mixed write-read-lock"); printk(" |"); dotest(rlock_AA3, FAILURE, LOCKTYPE_RWLOCK); printk(" |"); dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM); printk("\n"); printk(" --------------------------------------------------------------------------\n"); /* * irq-context testcases: */ DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1); DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A); DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B); DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3); DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4); DO_TESTCASE_6x6RW("irq lock-inversion", irq_inversion); DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); if (unexpected_testcase_failures) { printk("-----------------------------------------------------------------\n"); debug_locks = 0; printk("BUG: %3d unexpected failures (out of %3d) - debugging disabled! |\n", unexpected_testcase_failures, testcase_total); printk("-----------------------------------------------------------------\n"); } else if (expected_testcase_failures && testcase_successes) { printk("--------------------------------------------------------\n"); printk("%3d out of %3d testcases failed, as expected. |\n", expected_testcase_failures, testcase_total); printk("----------------------------------------------------\n"); debug_locks = 1; } else if (expected_testcase_failures && !testcase_successes) { printk("--------------------------------------------------------\n"); printk("All %3d testcases failed, as expected. |\n", expected_testcase_failures); printk("----------------------------------------\n"); debug_locks = 1; } else { printk("-------------------------------------------------------\n"); printk("Good, all %3d testcases passed! |\n", testcase_successes); printk("---------------------------------\n"); debug_locks = 1; } debug_locks_silent = 0; }
gpl-2.0
ali-filth/android_kernel_samsung_arubaslim
sound/soc/fsl/mpc5200_psc_ac97.c
5026
8807
/* * linux/sound/mpc5200-ac97.c -- AC97 support for the Freescale MPC52xx chip. * * Copyright (C) 2009 Jon Smirl, Digispeaker * Author: Jon Smirl <jonsmirl@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/delay.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <asm/time.h> #include <asm/delay.h> #include <asm/mpc52xx.h> #include <asm/mpc52xx_psc.h> #include "mpc5200_dma.h" #include "mpc5200_psc_ac97.h" #define DRV_NAME "mpc5200-psc-ac97" /* ALSA only supports a single AC97 device so static is recommend here */ static struct psc_dma *psc_dma; static unsigned short psc_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { int status; unsigned int val; mutex_lock(&psc_dma->mutex); /* Wait for command send status zero = ready */ status = spin_event_timeout(!(in_be16(&psc_dma->psc_regs->sr_csr.status) & MPC52xx_PSC_SR_CMDSEND), 100, 0); if (status == 0) { pr_err("timeout on ac97 bus (rdy)\n"); mutex_unlock(&psc_dma->mutex); return -ENODEV; } /* Force clear the data valid bit */ in_be32(&psc_dma->psc_regs->ac97_data); /* Send the read */ out_be32(&psc_dma->psc_regs->ac97_cmd, (1<<31) | ((reg & 0x7f) << 24)); /* Wait for the answer */ status = spin_event_timeout((in_be16(&psc_dma->psc_regs->sr_csr.status) & MPC52xx_PSC_SR_DATA_VAL), 100, 0); if (status == 0) { pr_err("timeout on ac97 read (val) %x\n", in_be16(&psc_dma->psc_regs->sr_csr.status)); mutex_unlock(&psc_dma->mutex); return -ENODEV; } /* Get the data */ val = in_be32(&psc_dma->psc_regs->ac97_data); if (((val >> 24) & 0x7f) != reg) { pr_err("reg echo error on ac97 read\n"); mutex_unlock(&psc_dma->mutex); return -ENODEV; } val = (val >> 8) & 0xffff; mutex_unlock(&psc_dma->mutex); return (unsigned short) val; } static void psc_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { int status; mutex_lock(&psc_dma->mutex); /* Wait for command status zero = ready */ status = spin_event_timeout(!(in_be16(&psc_dma->psc_regs->sr_csr.status) & MPC52xx_PSC_SR_CMDSEND), 100, 0); if (status == 0) { pr_err("timeout on ac97 bus (write)\n"); goto out; } /* Write data */ out_be32(&psc_dma->psc_regs->ac97_cmd, ((reg & 0x7f) << 24) | (val << 8)); out: mutex_unlock(&psc_dma->mutex); } static void psc_ac97_warm_reset(struct snd_ac97 *ac97) { struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs; mutex_lock(&psc_dma->mutex); out_be32(&regs->sicr, psc_dma->sicr | MPC52xx_PSC_SICR_AWR); udelay(3); out_be32(&regs->sicr, psc_dma->sicr); mutex_unlock(&psc_dma->mutex); } static void psc_ac97_cold_reset(struct snd_ac97 *ac97) { struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs; mutex_lock(&psc_dma->mutex); dev_dbg(psc_dma->dev, "cold reset\n"); mpc5200_psc_ac97_gpio_reset(psc_dma->id); /* Notify the PSC that a reset has occurred */ out_be32(&regs->sicr, psc_dma->sicr | MPC52xx_PSC_SICR_ACRB); /* Re-enable RX and TX */ out_8(&regs->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); mutex_unlock(&psc_dma->mutex); msleep(1); psc_ac97_warm_reset(ac97); } struct snd_ac97_bus_ops soc_ac97_ops = { .read = psc_ac97_read, .write = psc_ac97_write, .reset = psc_ac97_cold_reset, .warm_reset = psc_ac97_warm_reset, }; EXPORT_SYMBOL_GPL(soc_ac97_ops); static int psc_ac97_hw_analog_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *cpu_dai) { struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(cpu_dai); struct psc_dma_stream *s = to_psc_dma_stream(substream, psc_dma); dev_dbg(psc_dma->dev, "%s(substream=%p) p_size=%i p_bytes=%i" " periods=%i buffer_size=%i buffer_bytes=%i channels=%i" " rate=%i format=%i\n", __func__, substream, params_period_size(params), params_period_bytes(params), params_periods(params), params_buffer_size(params), params_buffer_bytes(params), params_channels(params), params_rate(params), params_format(params)); /* Determine the set of enable bits to turn on */ s->ac97_slot_bits = (params_channels(params) == 1) ? 0x100 : 0x300; if (substream->pstr->stream != SNDRV_PCM_STREAM_CAPTURE) s->ac97_slot_bits <<= 16; return 0; } static int psc_ac97_hw_digital_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *cpu_dai) { struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(cpu_dai); dev_dbg(psc_dma->dev, "%s(substream=%p)\n", __func__, substream); if (params_channels(params) == 1) out_be32(&psc_dma->psc_regs->ac97_slots, 0x01000000); else out_be32(&psc_dma->psc_regs->ac97_slots, 0x03000000); return 0; } static int psc_ac97_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(dai); struct psc_dma_stream *s = to_psc_dma_stream(substream, psc_dma); switch (cmd) { case SNDRV_PCM_TRIGGER_START: dev_dbg(psc_dma->dev, "AC97 START: stream=%i\n", substream->pstr->stream); /* Set the slot enable bits */ psc_dma->slots |= s->ac97_slot_bits; out_be32(&psc_dma->psc_regs->ac97_slots, psc_dma->slots); break; case SNDRV_PCM_TRIGGER_STOP: dev_dbg(psc_dma->dev, "AC97 STOP: stream=%i\n", substream->pstr->stream); /* Clear the slot enable bits */ psc_dma->slots &= ~(s->ac97_slot_bits); out_be32(&psc_dma->psc_regs->ac97_slots, psc_dma->slots); break; } return 0; } static int psc_ac97_probe(struct snd_soc_dai *cpu_dai) { struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(cpu_dai); struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs; /* Go */ out_8(&regs->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); return 0; } /* --------------------------------------------------------------------- * ALSA SoC Bindings * * - Digital Audio Interface (DAI) template * - create/destroy dai hooks */ /** * psc_ac97_dai_template: template CPU Digital Audio Interface */ static const struct snd_soc_dai_ops psc_ac97_analog_ops = { .hw_params = psc_ac97_hw_analog_params, .trigger = psc_ac97_trigger, }; static const struct snd_soc_dai_ops psc_ac97_digital_ops = { .hw_params = psc_ac97_hw_digital_params, }; static struct snd_soc_dai_driver psc_ac97_dai[] = { { .ac97_control = 1, .probe = psc_ac97_probe, .playback = { .channels_min = 1, .channels_max = 6, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S32_BE, }, .capture = { .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S32_BE, }, .ops = &psc_ac97_analog_ops, }, { .ac97_control = 1, .playback = { .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_32000 | \ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE, }, .ops = &psc_ac97_digital_ops, } }; /* --------------------------------------------------------------------- * OF platform bus binding code: * - Probe/remove operations * - OF device match table */ static int __devinit psc_ac97_of_probe(struct platform_device *op) { int rc; struct snd_ac97 ac97; struct mpc52xx_psc __iomem *regs; rc = snd_soc_register_dais(&op->dev, psc_ac97_dai, ARRAY_SIZE(psc_ac97_dai)); if (rc != 0) { dev_err(&op->dev, "Failed to register DAI\n"); return rc; } psc_dma = dev_get_drvdata(&op->dev); regs = psc_dma->psc_regs; ac97.private_data = psc_dma; psc_dma->imr = 0; out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr); /* Configure the serial interface mode to AC97 */ psc_dma->sicr = MPC52xx_PSC_SICR_SIM_AC97 | MPC52xx_PSC_SICR_ENAC97; out_be32(&regs->sicr, psc_dma->sicr); /* No slots active */ out_be32(&regs->ac97_slots, 0x00000000); return 0; } static int __devexit psc_ac97_of_remove(struct platform_device *op) { snd_soc_unregister_dais(&op->dev, ARRAY_SIZE(psc_ac97_dai)); return 0; } /* Match table for of_platform binding */ static struct of_device_id psc_ac97_match[] __devinitdata = { { .compatible = "fsl,mpc5200-psc-ac97", }, { .compatible = "fsl,mpc5200b-psc-ac97", }, {} }; MODULE_DEVICE_TABLE(of, psc_ac97_match); static struct platform_driver psc_ac97_driver = { .probe = psc_ac97_of_probe, .remove = __devexit_p(psc_ac97_of_remove), .driver = { .name = "mpc5200-psc-ac97", .owner = THIS_MODULE, .of_match_table = psc_ac97_match, }, }; module_platform_driver(psc_ac97_driver); MODULE_AUTHOR("Jon Smirl <jonsmirl@gmail.com>"); MODULE_DESCRIPTION("mpc5200 AC97 module"); MODULE_LICENSE("GPL");
gpl-2.0
alvinhochun/sony-xperia-m-kernel
drivers/staging/ramster/cluster/masklog.c
7330
3944
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/uaccess.h> #include "masklog.h" struct mlog_bits r2_mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK); EXPORT_SYMBOL_GPL(r2_mlog_and_bits); struct mlog_bits r2_mlog_not_bits = MLOG_BITS_RHS(0); EXPORT_SYMBOL_GPL(r2_mlog_not_bits); static ssize_t mlog_mask_show(u64 mask, char *buf) { char *state; if (__mlog_test_u64(mask, r2_mlog_and_bits)) state = "allow"; else if (__mlog_test_u64(mask, r2_mlog_not_bits)) state = "deny"; else state = "off"; return snprintf(buf, PAGE_SIZE, "%s\n", state); } static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count) { if (!strnicmp(buf, "allow", 5)) { __mlog_set_u64(mask, r2_mlog_and_bits); __mlog_clear_u64(mask, r2_mlog_not_bits); } else if (!strnicmp(buf, "deny", 4)) { __mlog_set_u64(mask, r2_mlog_not_bits); __mlog_clear_u64(mask, r2_mlog_and_bits); } else if (!strnicmp(buf, "off", 3)) { __mlog_clear_u64(mask, r2_mlog_not_bits); __mlog_clear_u64(mask, r2_mlog_and_bits); } else return -EINVAL; return count; } struct mlog_attribute { struct attribute attr; u64 mask; }; #define to_mlog_attr(_attr) container_of(_attr, struct mlog_attribute, attr) #define define_mask(_name) { \ .attr = { \ .name = #_name, \ .mode = S_IRUGO | S_IWUSR, \ }, \ .mask = ML_##_name, \ } static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = { define_mask(TCP), define_mask(MSG), define_mask(SOCKET), define_mask(HEARTBEAT), define_mask(HB_BIO), define_mask(DLMFS), define_mask(DLM), define_mask(DLM_DOMAIN), define_mask(DLM_THREAD), define_mask(DLM_MASTER), define_mask(DLM_RECOVERY), define_mask(DLM_GLUE), define_mask(VOTE), define_mask(CONN), define_mask(QUORUM), define_mask(BASTS), define_mask(CLUSTER), define_mask(ERROR), define_mask(NOTICE), define_mask(KTHREAD), }; static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, }; static ssize_t mlog_show(struct kobject *obj, struct attribute *attr, char *buf) { struct mlog_attribute *mlog_attr = to_mlog_attr(attr); return mlog_mask_show(mlog_attr->mask, buf); } static ssize_t mlog_store(struct kobject *obj, struct attribute *attr, const char *buf, size_t count) { struct mlog_attribute *mlog_attr = to_mlog_attr(attr); return mlog_mask_store(mlog_attr->mask, buf, count); } static const struct sysfs_ops mlog_attr_ops = { .show = mlog_show, .store = mlog_store, }; static struct kobj_type mlog_ktype = { .default_attrs = mlog_attr_ptrs, .sysfs_ops = &mlog_attr_ops, }; static struct kset mlog_kset = { .kobj = {.ktype = &mlog_ktype}, }; int r2_mlog_sys_init(struct kset *r2cb_kset) { int i = 0; while (mlog_attrs[i].attr.mode) { mlog_attr_ptrs[i] = &mlog_attrs[i].attr; i++; } mlog_attr_ptrs[i] = NULL; kobject_set_name(&mlog_kset.kobj, "logmask"); mlog_kset.kobj.kset = r2cb_kset; return kset_register(&mlog_kset); } void r2_mlog_sys_shutdown(void) { kset_unregister(&mlog_kset); }
gpl-2.0
prototype-U/gcore_kernel
drivers/scsi/aic94xx/aic94xx_seq.c
8098
47441
/* * Aic94xx SAS/SATA driver sequencer interface. * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * Parts of this code adapted from David Chaw's adp94xx_seq.c. * * This file is licensed under GPLv2. * * This file is part of the aic94xx driver. * * The aic94xx driver is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 of the * License. * * The aic94xx driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with the aic94xx driver; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/delay.h> #include <linux/gfp.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/firmware.h> #include "aic94xx_reg.h" #include "aic94xx_hwi.h" #include "aic94xx_seq.h" #include "aic94xx_dump.h" /* It takes no more than 0.05 us for an instruction * to complete. So waiting for 1 us should be more than * plenty. */ #define PAUSE_DELAY 1 #define PAUSE_TRIES 1000 static const struct firmware *sequencer_fw; static u16 cseq_vecs[CSEQ_NUM_VECS], lseq_vecs[LSEQ_NUM_VECS], mode2_task, cseq_idle_loop, lseq_idle_loop; static const u8 *cseq_code, *lseq_code; static u32 cseq_code_size, lseq_code_size; static u16 first_scb_site_no = 0xFFFF; static u16 last_scb_site_no; /* ---------- Pause/Unpause CSEQ/LSEQ ---------- */ /** * asd_pause_cseq - pause the central sequencer * @asd_ha: pointer to host adapter structure * * Return 0 on success, negative on failure. */ static int asd_pause_cseq(struct asd_ha_struct *asd_ha) { int count = PAUSE_TRIES; u32 arp2ctl; arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); if (arp2ctl & PAUSED) return 0; asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl | EPAUSE); do { arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); if (arp2ctl & PAUSED) return 0; udelay(PAUSE_DELAY); } while (--count > 0); ASD_DPRINTK("couldn't pause CSEQ\n"); return -1; } /** * asd_unpause_cseq - unpause the central sequencer. * @asd_ha: pointer to host adapter structure. * * Return 0 on success, negative on error. */ static int asd_unpause_cseq(struct asd_ha_struct *asd_ha) { u32 arp2ctl; int count = PAUSE_TRIES; arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); if (!(arp2ctl & PAUSED)) return 0; asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl & ~EPAUSE); do { arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); if (!(arp2ctl & PAUSED)) return 0; udelay(PAUSE_DELAY); } while (--count > 0); ASD_DPRINTK("couldn't unpause the CSEQ\n"); return -1; } /** * asd_seq_pause_lseq - pause a link sequencer * @asd_ha: pointer to a host adapter structure * @lseq: link sequencer of interest * * Return 0 on success, negative on error. */ static int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq) { u32 arp2ctl; int count = PAUSE_TRIES; arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); if (arp2ctl & PAUSED) return 0; asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl | EPAUSE); do { arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); if (arp2ctl & PAUSED) return 0; udelay(PAUSE_DELAY); } while (--count > 0); ASD_DPRINTK("couldn't pause LSEQ %d\n", lseq); return -1; } /** * asd_pause_lseq - pause the link sequencer(s) * @asd_ha: pointer to host adapter structure * @lseq_mask: mask of link sequencers of interest * * Return 0 on success, negative on failure. */ static int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask) { int lseq; int err = 0; for_each_sequencer(lseq_mask, lseq_mask, lseq) { err = asd_seq_pause_lseq(asd_ha, lseq); if (err) return err; } return err; } /** * asd_seq_unpause_lseq - unpause a link sequencer * @asd_ha: pointer to host adapter structure * @lseq: link sequencer of interest * * Return 0 on success, negative on error. */ static int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq) { u32 arp2ctl; int count = PAUSE_TRIES; arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); if (!(arp2ctl & PAUSED)) return 0; asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl & ~EPAUSE); do { arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); if (!(arp2ctl & PAUSED)) return 0; udelay(PAUSE_DELAY); } while (--count > 0); ASD_DPRINTK("couldn't unpause LSEQ %d\n", lseq); return 0; } /* ---------- Downloading CSEQ/LSEQ microcode ---------- */ static int asd_verify_cseq(struct asd_ha_struct *asd_ha, const u8 *_prog, u32 size) { u32 addr = CSEQ_RAM_REG_BASE_ADR; const u32 *prog = (u32 *) _prog; u32 i; for (i = 0; i < size; i += 4, prog++, addr += 4) { u32 val = asd_read_reg_dword(asd_ha, addr); if (le32_to_cpu(*prog) != val) { asd_printk("%s: cseq verify failed at %u " "read:0x%x, wanted:0x%x\n", pci_name(asd_ha->pcidev), i, val, le32_to_cpu(*prog)); return -1; } } ASD_DPRINTK("verified %d bytes, passed\n", size); return 0; } /** * asd_verify_lseq - verify the microcode of a link sequencer * @asd_ha: pointer to host adapter structure * @_prog: pointer to the microcode * @size: size of the microcode in bytes * @lseq: link sequencer of interest * * The link sequencer code is accessed in 4 KB pages, which are selected * by setting LmRAMPAGE (bits 8 and 9) of the LmBISTCTL1 register. * The 10 KB LSEQm instruction code is mapped, page at a time, at * LmSEQRAM address. */ static int asd_verify_lseq(struct asd_ha_struct *asd_ha, const u8 *_prog, u32 size, int lseq) { #define LSEQ_CODEPAGE_SIZE 4096 int pages = (size + LSEQ_CODEPAGE_SIZE - 1) / LSEQ_CODEPAGE_SIZE; u32 page; const u32 *prog = (u32 *) _prog; for (page = 0; page < pages; page++) { u32 i; asd_write_reg_dword(asd_ha, LmBISTCTL1(lseq), page << LmRAMPAGE_LSHIFT); for (i = 0; size > 0 && i < LSEQ_CODEPAGE_SIZE; i += 4, prog++, size-=4) { u32 val = asd_read_reg_dword(asd_ha, LmSEQRAM(lseq)+i); if (le32_to_cpu(*prog) != val) { asd_printk("%s: LSEQ%d verify failed " "page:%d, offs:%d\n", pci_name(asd_ha->pcidev), lseq, page, i); return -1; } } } ASD_DPRINTK("LSEQ%d verified %d bytes, passed\n", lseq, (int)((u8 *)prog-_prog)); return 0; } /** * asd_verify_seq -- verify CSEQ/LSEQ microcode * @asd_ha: pointer to host adapter structure * @prog: pointer to microcode * @size: size of the microcode * @lseq_mask: if 0, verify CSEQ microcode, else mask of LSEQs of interest * * Return 0 if microcode is correct, negative on mismatch. */ static int asd_verify_seq(struct asd_ha_struct *asd_ha, const u8 *prog, u32 size, u8 lseq_mask) { if (lseq_mask == 0) return asd_verify_cseq(asd_ha, prog, size); else { int lseq, err; for_each_sequencer(lseq_mask, lseq_mask, lseq) { err = asd_verify_lseq(asd_ha, prog, size, lseq); if (err) return err; } } return 0; } #define ASD_DMA_MODE_DOWNLOAD #ifdef ASD_DMA_MODE_DOWNLOAD /* This is the size of the CSEQ Mapped instruction page */ #define MAX_DMA_OVLY_COUNT ((1U << 14)-1) static int asd_download_seq(struct asd_ha_struct *asd_ha, const u8 * const prog, u32 size, u8 lseq_mask) { u32 comstaten; u32 reg; int page; const int pages = (size + MAX_DMA_OVLY_COUNT - 1) / MAX_DMA_OVLY_COUNT; struct asd_dma_tok *token; int err = 0; if (size % 4) { asd_printk("sequencer program not multiple of 4\n"); return -1; } asd_pause_cseq(asd_ha); asd_pause_lseq(asd_ha, 0xFF); /* save, disable and clear interrupts */ comstaten = asd_read_reg_dword(asd_ha, COMSTATEN); asd_write_reg_dword(asd_ha, COMSTATEN, 0); asd_write_reg_dword(asd_ha, COMSTAT, COMSTAT_MASK); asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN); asd_write_reg_dword(asd_ha, CHIMINT, CHIMINT_MASK); token = asd_alloc_coherent(asd_ha, MAX_DMA_OVLY_COUNT, GFP_KERNEL); if (!token) { asd_printk("out of memory for dma SEQ download\n"); err = -ENOMEM; goto out; } ASD_DPRINTK("dma-ing %d bytes\n", size); for (page = 0; page < pages; page++) { int i; u32 left = min(size-page*MAX_DMA_OVLY_COUNT, (u32)MAX_DMA_OVLY_COUNT); memcpy(token->vaddr, prog + page*MAX_DMA_OVLY_COUNT, left); asd_write_reg_addr(asd_ha, OVLYDMAADR, token->dma_handle); asd_write_reg_dword(asd_ha, OVLYDMACNT, left); reg = !page ? RESETOVLYDMA : 0; reg |= (STARTOVLYDMA | OVLYHALTERR); reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ); /* Start DMA. */ asd_write_reg_dword(asd_ha, OVLYDMACTL, reg); for (i = PAUSE_TRIES*100; i > 0; i--) { u32 dmadone = asd_read_reg_dword(asd_ha, OVLYDMACTL); if (!(dmadone & OVLYDMAACT)) break; udelay(PAUSE_DELAY); } } reg = asd_read_reg_dword(asd_ha, COMSTAT); if (!(reg & OVLYDMADONE) || (reg & OVLYERR) || (asd_read_reg_dword(asd_ha, CHIMINT) & DEVEXCEPT_MASK)){ asd_printk("%s: error DMA-ing sequencer code\n", pci_name(asd_ha->pcidev)); err = -ENODEV; } asd_free_coherent(asd_ha, token); out: asd_write_reg_dword(asd_ha, COMSTATEN, comstaten); return err ? : asd_verify_seq(asd_ha, prog, size, lseq_mask); } #else /* ASD_DMA_MODE_DOWNLOAD */ static int asd_download_seq(struct asd_ha_struct *asd_ha, const u8 *_prog, u32 size, u8 lseq_mask) { int i; u32 reg = 0; const u32 *prog = (u32 *) _prog; if (size % 4) { asd_printk("sequencer program not multiple of 4\n"); return -1; } asd_pause_cseq(asd_ha); asd_pause_lseq(asd_ha, 0xFF); reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ); reg |= PIOCMODE; asd_write_reg_dword(asd_ha, OVLYDMACNT, size); asd_write_reg_dword(asd_ha, OVLYDMACTL, reg); ASD_DPRINTK("downloading %s sequencer%s in PIO mode...\n", lseq_mask ? "LSEQ" : "CSEQ", lseq_mask ? "s" : ""); for (i = 0; i < size; i += 4, prog++) asd_write_reg_dword(asd_ha, SPIODATA, *prog); reg = (reg & ~PIOCMODE) | OVLYHALTERR; asd_write_reg_dword(asd_ha, OVLYDMACTL, reg); return asd_verify_seq(asd_ha, _prog, size, lseq_mask); } #endif /* ASD_DMA_MODE_DOWNLOAD */ /** * asd_seq_download_seqs - download the sequencer microcode * @asd_ha: pointer to host adapter structure * * Download the central and link sequencer microcode. */ static int asd_seq_download_seqs(struct asd_ha_struct *asd_ha) { int err; if (!asd_ha->hw_prof.enabled_phys) { asd_printk("%s: no enabled phys!\n", pci_name(asd_ha->pcidev)); return -ENODEV; } /* Download the CSEQ */ ASD_DPRINTK("downloading CSEQ...\n"); err = asd_download_seq(asd_ha, cseq_code, cseq_code_size, 0); if (err) { asd_printk("CSEQ download failed:%d\n", err); return err; } /* Download the Link Sequencers code. All of the Link Sequencers * microcode can be downloaded at the same time. */ ASD_DPRINTK("downloading LSEQs...\n"); err = asd_download_seq(asd_ha, lseq_code, lseq_code_size, asd_ha->hw_prof.enabled_phys); if (err) { /* Try it one at a time */ u8 lseq; u8 lseq_mask = asd_ha->hw_prof.enabled_phys; for_each_sequencer(lseq_mask, lseq_mask, lseq) { err = asd_download_seq(asd_ha, lseq_code, lseq_code_size, 1<<lseq); if (err) break; } } if (err) asd_printk("LSEQs download failed:%d\n", err); return err; } /* ---------- Initializing the chip, chip memory, etc. ---------- */ /** * asd_init_cseq_mip - initialize CSEQ mode independent pages 4-7 * @asd_ha: pointer to host adapter structure */ static void asd_init_cseq_mip(struct asd_ha_struct *asd_ha) { /* CSEQ Mode Independent, page 4 setup. */ asd_write_reg_word(asd_ha, CSEQ_Q_EXE_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_EXE_TAIL, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_DONE_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_DONE_TAIL, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_SEND_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_SEND_TAIL, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_TAIL, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_COPY_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_COPY_TAIL, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_REG0, 0); asd_write_reg_word(asd_ha, CSEQ_REG1, 0); asd_write_reg_dword(asd_ha, CSEQ_REG2, 0); asd_write_reg_byte(asd_ha, CSEQ_LINK_CTL_Q_MAP, 0); { u8 con = asd_read_reg_byte(asd_ha, CCONEXIST); u8 val = hweight8(con); asd_write_reg_byte(asd_ha, CSEQ_MAX_CSEQ_MODE, (val<<4)|val); } asd_write_reg_word(asd_ha, CSEQ_FREE_LIST_HACK_COUNT, 0); /* CSEQ Mode independent, page 5 setup. */ asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE, 0); asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE+4, 0); asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT, 0); asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT+4, 0); asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_TAIL, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_NEED_EST_NEXUS_SCB, 0); asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_HEAD, 0); asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_TAIL, 0); asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_SCB_OFFSET, 0); /* CSEQ Mode independent, page 6 setup. */ asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR0, 0); asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR1, 0); asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_SCBPTR, 0); asd_write_reg_byte(asd_ha, CSEQ_INT_ROUT_MODE, 0); asd_write_reg_byte(asd_ha, CSEQ_ISR_SCRATCH_FLAGS, 0); asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_SINDEX, 0); asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_DINDEX, 0); asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_TAIL, 0xFFFF); /* Calculate the free scb mask. */ { u16 cmdctx = asd_get_cmdctx_size(asd_ha); cmdctx = (~((cmdctx/128)-1)) >> 8; asd_write_reg_byte(asd_ha, CSEQ_FREE_SCB_MASK, (u8)cmdctx); } asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_HEAD, first_scb_site_no); asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_TAIL, last_scb_site_no); asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_TAIL, 0xFFFF); /* CSEQ Mode independent, page 7 setup. */ asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE, 0); asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE+4, 0); asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT, 0); asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT+4, 0); asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_TAIL, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_NEED_EMPTY_SCB, 0); asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_HEAD, 0); asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_TAIL, 0); asd_write_reg_byte(asd_ha, CSEQ_EMPTY_SCB_OFFSET, 0); asd_write_reg_word(asd_ha, CSEQ_PRIMITIVE_DATA, 0); asd_write_reg_dword(asd_ha, CSEQ_TIMEOUT_CONST, 0); } /** * asd_init_cseq_mdp - initialize CSEQ Mode dependent pages * @asd_ha: pointer to host adapter structure */ static void asd_init_cseq_mdp(struct asd_ha_struct *asd_ha) { int i; int moffs; moffs = CSEQ_PAGE_SIZE * 2; /* CSEQ Mode dependent, modes 0-7, page 0 setup. */ for (i = 0; i < 8; i++) { asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SINDEX, 0); asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCBPTR, 0); asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_TAIL, 0xFFFF); asd_write_reg_byte(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCRPAGE, 0); } /* CSEQ Mode dependent, mode 0-7, page 1 and 2 shall be ignored. */ /* CSEQ Mode dependent, mode 8, page 0 setup. */ asd_write_reg_word(asd_ha, CSEQ_RET_ADDR, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_RET_SCBPTR, 0); asd_write_reg_word(asd_ha, CSEQ_SAVE_SCBPTR, 0); asd_write_reg_word(asd_ha, CSEQ_EMPTY_TRANS_CTX, 0); asd_write_reg_word(asd_ha, CSEQ_RESP_LEN, 0); asd_write_reg_word(asd_ha, CSEQ_TMF_SCBPTR, 0); asd_write_reg_word(asd_ha, CSEQ_GLOBAL_PREV_SCB, 0); asd_write_reg_word(asd_ha, CSEQ_GLOBAL_HEAD, 0); asd_write_reg_word(asd_ha, CSEQ_CLEAR_LU_HEAD, 0); asd_write_reg_byte(asd_ha, CSEQ_TMF_OPCODE, 0); asd_write_reg_byte(asd_ha, CSEQ_SCRATCH_FLAGS, 0); asd_write_reg_word(asd_ha, CSEQ_HSB_SITE, 0); asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_SCB_SITE, (u16)last_scb_site_no+1); asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_DDB_SITE, (u16)asd_ha->hw_prof.max_ddbs); /* CSEQ Mode dependent, mode 8, page 1 setup. */ asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR, 0); asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR + 4, 0); asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK, 0); asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK + 4, 0); /* CSEQ Mode dependent, mode 8, page 2 setup. */ /* Tell the sequencer the bus address of the first SCB. */ asd_write_reg_addr(asd_ha, CSEQ_HQ_NEW_POINTER, asd_ha->seq.next_scb.dma_handle); ASD_DPRINTK("First SCB dma_handle: 0x%llx\n", (unsigned long long)asd_ha->seq.next_scb.dma_handle); /* Tell the sequencer the first Done List entry address. */ asd_write_reg_addr(asd_ha, CSEQ_HQ_DONE_BASE, asd_ha->seq.actual_dl->dma_handle); /* Initialize the Q_DONE_POINTER with the least significant * 4 bytes of the first Done List address. */ asd_write_reg_dword(asd_ha, CSEQ_HQ_DONE_POINTER, ASD_BUSADDR_LO(asd_ha->seq.actual_dl->dma_handle)); asd_write_reg_byte(asd_ha, CSEQ_HQ_DONE_PASS, ASD_DEF_DL_TOGGLE); /* CSEQ Mode dependent, mode 8, page 3 shall be ignored. */ } /** * asd_init_cseq_scratch -- setup and init CSEQ * @asd_ha: pointer to host adapter structure * * Setup and initialize Central sequencers. Initialize the mode * independent and dependent scratch page to the default settings. */ static void asd_init_cseq_scratch(struct asd_ha_struct *asd_ha) { asd_init_cseq_mip(asd_ha); asd_init_cseq_mdp(asd_ha); } /** * asd_init_lseq_mip -- initialize LSEQ Mode independent pages 0-3 * @asd_ha: pointer to host adapter structure */ static void asd_init_lseq_mip(struct asd_ha_struct *asd_ha, u8 lseq) { int i; /* LSEQ Mode independent page 0 setup. */ asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_HEAD(lseq), 0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_TAIL(lseq), 0xFFFF); asd_write_reg_byte(asd_ha, LmSEQ_LINK_NUMBER(lseq), lseq); asd_write_reg_byte(asd_ha, LmSEQ_SCRATCH_FLAGS(lseq), ASD_NOTIFY_ENABLE_SPINUP); asd_write_reg_dword(asd_ha, LmSEQ_CONNECTION_STATE(lseq),0x08000000); asd_write_reg_word(asd_ha, LmSEQ_CONCTL(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_CONSTAT(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_CONNECTION_MODES(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_REG1_ISR(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_REG2_ISR(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_REG3_ISR(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq)+4, 0); /* LSEQ Mode independent page 1 setup. */ asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR0(lseq), 0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR1(lseq), 0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR2(lseq), 0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR3(lseq), 0xFFFF); asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE0(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE1(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE2(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE3(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_HEAD(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_TAIL(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_BUF_AVAIL(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_TIMEOUT_CONST(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_SINDEX(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_DINDEX(lseq), 0); /* LSEQ Mode Independent page 2 setup. */ asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR0(lseq), 0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR1(lseq), 0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR2(lseq), 0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR3(lseq), 0xFFFF); asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD0(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD1(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD2(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD3(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_HEAD(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_TAIL(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_BUFS_AVAIL(lseq), 0); for (i = 0; i < 12; i += 4) asd_write_reg_dword(asd_ha, LmSEQ_ATA_SCR_REGS(lseq) + i, 0); /* LSEQ Mode Independent page 3 setup. */ /* Device present timer timeout */ asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TMR_TOUT_CONST(lseq), ASD_DEV_PRESENT_TIMEOUT); /* SATA interlock timer disabled */ asd_write_reg_dword(asd_ha, LmSEQ_SATA_INTERLOCK_TIMEOUT(lseq), ASD_SATA_INTERLOCK_TIMEOUT); /* STP shutdown timer timeout constant, IGNORED by the sequencer, * always 0. */ asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMEOUT(lseq), ASD_STP_SHUTDOWN_TIMEOUT); asd_write_reg_dword(asd_ha, LmSEQ_SRST_ASSERT_TIMEOUT(lseq), ASD_SRST_ASSERT_TIMEOUT); asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMEOUT(lseq), ASD_RCV_FIS_TIMEOUT); asd_write_reg_dword(asd_ha, LmSEQ_ONE_MILLISEC_TIMEOUT(lseq), ASD_ONE_MILLISEC_TIMEOUT); /* COM_INIT timer */ asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(lseq), ASD_TEN_MILLISEC_TIMEOUT); asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMEOUT(lseq), ASD_SMP_RCV_TIMEOUT); } /** * asd_init_lseq_mdp -- initialize LSEQ mode dependent pages. * @asd_ha: pointer to host adapter structure */ static void asd_init_lseq_mdp(struct asd_ha_struct *asd_ha, int lseq) { int i; u32 moffs; u16 ret_addr[] = { 0xFFFF, /* mode 0 */ 0xFFFF, /* mode 1 */ mode2_task, /* mode 2 */ 0, 0xFFFF, /* mode 4/5 */ 0xFFFF, /* mode 4/5 */ }; /* * Mode 0,1,2 and 4/5 have common field on page 0 for the first * 14 bytes. */ for (i = 0; i < 3; i++) { moffs = i * LSEQ_MODE_SCRATCH_SIZE; asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)+moffs, ret_addr[i]); asd_write_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)+moffs, 0); asd_write_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)+moffs, 0); asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)+moffs,0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)+moffs,0xFFFF); asd_write_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)+moffs,0); asd_write_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)+moffs,0); } /* * Mode 5 page 0 overlaps the same scratch page with Mode 0 page 3. */ asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)+LSEQ_MODE5_PAGE0_OFFSET, ret_addr[5]); asd_write_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0); asd_write_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0); asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF); asd_write_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0); asd_write_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0); /* LSEQ Mode dependent 0, page 0 setup. */ asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_DDB_SITE(lseq), (u16)asd_ha->hw_prof.max_ddbs); asd_write_reg_word(asd_ha, LmSEQ_EMPTY_TRANS_CTX(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_RESP_LEN(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_SCB_SITE(lseq), (u16)last_scb_site_no+1); asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq), (u16) ((LmM0INTEN_MASK & 0xFFFF0000) >> 16)); asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq) + 2, (u16) LmM0INTEN_MASK & 0xFFFF); asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_FRM_LEN(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_PROTOCOL(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_RESP_STATUS(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_LAST_LOADED_SGE(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_SAVE_SCBPTR(lseq), 0); /* LSEQ mode dependent, mode 1, page 0 setup. */ asd_write_reg_word(asd_ha, LmSEQ_Q_XMIT_HEAD(lseq), 0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_M1_EMPTY_TRANS_CTX(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_INI_CONN_TAG(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_FAILED_OPEN_STATUS(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_XMIT_REQUEST_TYPE(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_M1_RESP_STATUS(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_M1_LAST_LOADED_SGE(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_M1_SAVE_SCBPTR(lseq), 0); /* LSEQ Mode dependent mode 2, page 0 setup */ asd_write_reg_word(asd_ha, LmSEQ_PORT_COUNTER(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_PM_TABLE_PTR(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_SATA_INTERLOCK_TMR_SAVE(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_IP_BITL(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_COPY_SMP_CONN_TAG(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_P0M2_OFFS1AH(lseq), 0); /* LSEQ Mode dependent, mode 4/5, page 0 setup. */ asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_STATUS(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_MODE(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_Q_LINK_HEAD(lseq), 0xFFFF); asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_ERR(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_SIGNALS(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_SAS_RESET_MODE(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_LINK_RESET_RETRY_COUNT(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_NUM_LINK_RESET_RETRIES(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_OOB_INT_ENABLES(lseq), 0); /* * Set the desired interval between transmissions of the NOTIFY * (ENABLE SPINUP) primitive. Must be initialized to val - 1. */ asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_TIMEOUT(lseq), ASD_NOTIFY_TIMEOUT - 1); /* No delay for the first NOTIFY to be sent to the attached target. */ asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_DOWN_COUNT(lseq), ASD_NOTIFY_DOWN_COUNT); asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_INITIAL_COUNT(lseq), ASD_NOTIFY_DOWN_COUNT); /* LSEQ Mode dependent, mode 0 and 1, page 1 setup. */ for (i = 0; i < 2; i++) { int j; /* Start from Page 1 of Mode 0 and 1. */ moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE; /* All the fields of page 1 can be initialized to 0. */ for (j = 0; j < LSEQ_PAGE_SIZE; j += 4) asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0); } /* LSEQ Mode dependent, mode 2, page 1 setup. */ asd_write_reg_dword(asd_ha, LmSEQ_INVALID_DWORD_COUNT(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_DISPARITY_ERROR_COUNT(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_LOSS_OF_SYNC_COUNT(lseq), 0); /* LSEQ Mode dependent, mode 4/5, page 1. */ for (i = 0; i < LSEQ_PAGE_SIZE; i+=4) asd_write_reg_dword(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq)+i, 0); asd_write_reg_byte(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq), 0xFF); asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq), 0xFF); asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+1,0xFF); asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+2,0xFF); asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq), 0xFF); asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+1, 0xFF); asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+2, 0xFF); asd_write_reg_dword(asd_ha, LmSEQ_DATA_OFFSET(lseq), 0xFFFFFFFF); /* LSEQ Mode dependent, mode 0, page 2 setup. */ asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMER_TERM_TS(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_DEVICE_BITS(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_SDB_DDB(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_SDB_NUM_TAGS(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_SDB_CURR_TAG(lseq), 0); /* LSEQ Mode Dependent 1, page 2 setup. */ asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq)+4, 0); asd_write_reg_dword(asd_ha, LmSEQ_OPEN_TIMER_TERM_TS(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_SRST_AS_TIMER_TERM_TS(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_LAST_LOADED_SG_EL(lseq), 0); /* LSEQ Mode Dependent 2, page 2 setup. */ /* The LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS is IGNORED by the sequencer, * i.e. always 0. */ asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(lseq),0); asd_write_reg_dword(asd_ha, LmSEQ_CLOSE_TIMER_TERM_TS(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_BREAK_TIMER_TERM_TS(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_DWS_RESET_TIMER_TERM_TS(lseq), 0); asd_write_reg_dword(asd_ha,LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(lseq),0); asd_write_reg_dword(asd_ha, LmSEQ_MCTL_TIMER_TERM_TS(lseq), 0); /* LSEQ Mode Dependent 4/5, page 2 setup. */ asd_write_reg_dword(asd_ha, LmSEQ_COMINIT_TIMER_TERM_TS(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_RCV_ID_TIMER_TERM_TS(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMER_TERM_TS(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TIMER_TERM_TS(lseq), 0); } /** * asd_init_lseq_scratch -- setup and init link sequencers * @asd_ha: pointer to host adapter struct */ static void asd_init_lseq_scratch(struct asd_ha_struct *asd_ha) { u8 lseq; u8 lseq_mask; lseq_mask = asd_ha->hw_prof.enabled_phys; for_each_sequencer(lseq_mask, lseq_mask, lseq) { asd_init_lseq_mip(asd_ha, lseq); asd_init_lseq_mdp(asd_ha, lseq); } } /** * asd_init_scb_sites -- initialize sequencer SCB sites (memory). * @asd_ha: pointer to host adapter structure * * This should be done before initializing common CSEQ and LSEQ * scratch since those areas depend on some computed values here, * last_scb_site_no, etc. */ static void asd_init_scb_sites(struct asd_ha_struct *asd_ha) { u16 site_no; u16 max_scbs = 0; for (site_no = asd_ha->hw_prof.max_scbs-1; site_no != (u16) -1; site_no--) { u16 i; /* Initialize all fields in the SCB site to 0. */ for (i = 0; i < ASD_SCB_SIZE; i += 4) asd_scbsite_write_dword(asd_ha, site_no, i, 0); /* Initialize SCB Site Opcode field to invalid. */ asd_scbsite_write_byte(asd_ha, site_no, offsetof(struct scb_header, opcode), 0xFF); /* Initialize SCB Site Flags field to mean a response * frame has been received. This means inadvertent * frames received to be dropped. */ asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01); /* Workaround needed by SEQ to fix a SATA issue is to exclude * certain SCB sites from the free list. */ if (!SCB_SITE_VALID(site_no)) continue; if (last_scb_site_no == 0) last_scb_site_no = site_no; /* For every SCB site, we need to initialize the * following fields: Q_NEXT, SCB_OPCODE, SCB_FLAGS, * and SG Element Flag. */ /* Q_NEXT field of the last SCB is invalidated. */ asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no); first_scb_site_no = site_no; max_scbs++; } asd_ha->hw_prof.max_scbs = max_scbs; ASD_DPRINTK("max_scbs:%d\n", asd_ha->hw_prof.max_scbs); ASD_DPRINTK("first_scb_site_no:0x%x\n", first_scb_site_no); ASD_DPRINTK("last_scb_site_no:0x%x\n", last_scb_site_no); } /** * asd_init_cseq_cio - initialize CSEQ CIO registers * @asd_ha: pointer to host adapter structure */ static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha) { int i; asd_write_reg_byte(asd_ha, CSEQCOMINTEN, 0); asd_write_reg_byte(asd_ha, CSEQDLCTL, ASD_DL_SIZE_BITS); asd_write_reg_byte(asd_ha, CSEQDLOFFS, 0); asd_write_reg_byte(asd_ha, CSEQDLOFFS+1, 0); asd_ha->seq.scbpro = 0; asd_write_reg_dword(asd_ha, SCBPRO, 0); asd_write_reg_dword(asd_ha, CSEQCON, 0); /* Initialize CSEQ Mode 11 Interrupt Vectors. * The addresses are 16 bit wide and in dword units. * The values of their macros are in byte units. * Thus we have to divide by 4. */ asd_write_reg_word(asd_ha, CM11INTVEC0, cseq_vecs[0]); asd_write_reg_word(asd_ha, CM11INTVEC1, cseq_vecs[1]); asd_write_reg_word(asd_ha, CM11INTVEC2, cseq_vecs[2]); /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */ asd_write_reg_byte(asd_ha, CARP2INTEN, EN_ARP2HALTC); /* Initialize CSEQ Scratch Page to 0x04. */ asd_write_reg_byte(asd_ha, CSCRATCHPAGE, 0x04); /* Initialize CSEQ Mode[0-8] Dependent registers. */ /* Initialize Scratch Page to 0. */ for (i = 0; i < 9; i++) asd_write_reg_byte(asd_ha, CMnSCRATCHPAGE(i), 0); /* Reset the ARP2 Program Count. */ asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop); for (i = 0; i < 8; i++) { /* Initialize Mode n Link m Interrupt Enable. */ asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF); /* Initialize Mode n Request Mailbox. */ asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0); } } /** * asd_init_lseq_cio -- initialize LmSEQ CIO registers * @asd_ha: pointer to host adapter structure */ static void asd_init_lseq_cio(struct asd_ha_struct *asd_ha, int lseq) { u8 *sas_addr; int i; /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */ asd_write_reg_dword(asd_ha, LmARP2INTEN(lseq), EN_ARP2HALTC); asd_write_reg_byte(asd_ha, LmSCRATCHPAGE(lseq), 0); /* Initialize Mode 0,1, and 2 SCRATCHPAGE to 0. */ for (i = 0; i < 3; i++) asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, i), 0); /* Initialize Mode 5 SCRATCHPAGE to 0. */ asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, 5), 0); asd_write_reg_dword(asd_ha, LmRSPMBX(lseq), 0); /* Initialize Mode 0,1,2 and 5 Interrupt Enable and * Interrupt registers. */ asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 0), LmM0INTEN_MASK); asd_write_reg_dword(asd_ha, LmMnINT(lseq, 0), 0xFFFFFFFF); /* Mode 1 */ asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 1), LmM1INTEN_MASK); asd_write_reg_dword(asd_ha, LmMnINT(lseq, 1), 0xFFFFFFFF); /* Mode 2 */ asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 2), LmM2INTEN_MASK); asd_write_reg_dword(asd_ha, LmMnINT(lseq, 2), 0xFFFFFFFF); /* Mode 5 */ asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 5), LmM5INTEN_MASK); asd_write_reg_dword(asd_ha, LmMnINT(lseq, 5), 0xFFFFFFFF); /* Enable HW Timer status. */ asd_write_reg_byte(asd_ha, LmHWTSTATEN(lseq), LmHWTSTATEN_MASK); /* Enable Primitive Status 0 and 1. */ asd_write_reg_dword(asd_ha, LmPRIMSTAT0EN(lseq), LmPRIMSTAT0EN_MASK); asd_write_reg_dword(asd_ha, LmPRIMSTAT1EN(lseq), LmPRIMSTAT1EN_MASK); /* Enable Frame Error. */ asd_write_reg_dword(asd_ha, LmFRMERREN(lseq), LmFRMERREN_MASK); asd_write_reg_byte(asd_ha, LmMnHOLDLVL(lseq, 0), 0x50); /* Initialize Mode 0 Transfer Level to 512. */ asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 0), LmMnXFRLVL_512); /* Initialize Mode 1 Transfer Level to 256. */ asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 1), LmMnXFRLVL_256); /* Initialize Program Count. */ asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop); /* Enable Blind SG Move. */ asd_write_reg_dword(asd_ha, LmMODECTL(lseq), LmBLIND48); asd_write_reg_word(asd_ha, LmM3SATATIMER(lseq), ASD_SATA_INTERLOCK_TIMEOUT); (void) asd_read_reg_dword(asd_ha, LmREQMBX(lseq)); /* Clear Primitive Status 0 and 1. */ asd_write_reg_dword(asd_ha, LmPRMSTAT0(lseq), 0xFFFFFFFF); asd_write_reg_dword(asd_ha, LmPRMSTAT1(lseq), 0xFFFFFFFF); /* Clear HW Timer status. */ asd_write_reg_byte(asd_ha, LmHWTSTAT(lseq), 0xFF); /* Clear DMA Errors for Mode 0 and 1. */ asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 0), 0xFF); asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 1), 0xFF); /* Clear SG DMA Errors for Mode 0 and 1. */ asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 0), 0xFF); asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 1), 0xFF); /* Clear Mode 0 Buffer Parity Error. */ asd_write_reg_byte(asd_ha, LmMnBUFSTAT(lseq, 0), LmMnBUFPERR); /* Clear Mode 0 Frame Error register. */ asd_write_reg_dword(asd_ha, LmMnFRMERR(lseq, 0), 0xFFFFFFFF); /* Reset LSEQ external interrupt arbiter. */ asd_write_reg_byte(asd_ha, LmARP2INTCTL(lseq), RSTINTCTL); /* Set the Phy SAS for the LmSEQ WWN. */ sas_addr = asd_ha->phys[lseq].phy_desc->sas_addr; for (i = 0; i < SAS_ADDR_SIZE; i++) asd_write_reg_byte(asd_ha, LmWWN(lseq) + i, sas_addr[i]); /* Set the Transmit Size to 1024 bytes, 0 = 256 Dwords. */ asd_write_reg_byte(asd_ha, LmMnXMTSIZE(lseq, 1), 0); /* Set the Bus Inactivity Time Limit Timer. */ asd_write_reg_word(asd_ha, LmBITL_TIMER(lseq), 9); /* Enable SATA Port Multiplier. */ asd_write_reg_byte(asd_ha, LmMnSATAFS(lseq, 1), 0x80); /* Initialize Interrupt Vector[0-10] address in Mode 3. * See the comment on CSEQ_INT_* */ asd_write_reg_word(asd_ha, LmM3INTVEC0(lseq), lseq_vecs[0]); asd_write_reg_word(asd_ha, LmM3INTVEC1(lseq), lseq_vecs[1]); asd_write_reg_word(asd_ha, LmM3INTVEC2(lseq), lseq_vecs[2]); asd_write_reg_word(asd_ha, LmM3INTVEC3(lseq), lseq_vecs[3]); asd_write_reg_word(asd_ha, LmM3INTVEC4(lseq), lseq_vecs[4]); asd_write_reg_word(asd_ha, LmM3INTVEC5(lseq), lseq_vecs[5]); asd_write_reg_word(asd_ha, LmM3INTVEC6(lseq), lseq_vecs[6]); asd_write_reg_word(asd_ha, LmM3INTVEC7(lseq), lseq_vecs[7]); asd_write_reg_word(asd_ha, LmM3INTVEC8(lseq), lseq_vecs[8]); asd_write_reg_word(asd_ha, LmM3INTVEC9(lseq), lseq_vecs[9]); asd_write_reg_word(asd_ha, LmM3INTVEC10(lseq), lseq_vecs[10]); /* * Program the Link LED control, applicable only for * Chip Rev. B or later. */ asd_write_reg_dword(asd_ha, LmCONTROL(lseq), (LEDTIMER | LEDMODE_TXRX | LEDTIMERS_100ms)); /* Set the Align Rate for SAS and STP mode. */ asd_write_reg_byte(asd_ha, LmM1SASALIGN(lseq), SAS_ALIGN_DEFAULT); asd_write_reg_byte(asd_ha, LmM1STPALIGN(lseq), STP_ALIGN_DEFAULT); } /** * asd_post_init_cseq -- clear CSEQ Mode n Int. status and Response mailbox * @asd_ha: pointer to host adapter struct */ static void asd_post_init_cseq(struct asd_ha_struct *asd_ha) { int i; for (i = 0; i < 8; i++) asd_write_reg_dword(asd_ha, CMnINT(i), 0xFFFFFFFF); for (i = 0; i < 8; i++) asd_read_reg_dword(asd_ha, CMnRSPMBX(i)); /* Reset the external interrupt arbiter. */ asd_write_reg_byte(asd_ha, CARP2INTCTL, RSTINTCTL); } /** * asd_init_ddb_0 -- initialize DDB 0 * @asd_ha: pointer to host adapter structure * * Initialize DDB site 0 which is used internally by the sequencer. */ static void asd_init_ddb_0(struct asd_ha_struct *asd_ha) { int i; /* Zero out the DDB explicitly */ for (i = 0; i < sizeof(struct asd_ddb_seq_shared); i+=4) asd_ddbsite_write_dword(asd_ha, 0, i, 0); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, q_free_ddb_head), 0); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, q_free_ddb_tail), asd_ha->hw_prof.max_ddbs-1); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, q_free_ddb_cnt), 0); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, q_used_ddb_head), 0xFFFF); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, q_used_ddb_tail), 0xFFFF); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, shared_mem_lock), 0); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, smp_conn_tag), 0); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, est_nexus_buf_cnt), 0); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, est_nexus_buf_thresh), asd_ha->hw_prof.num_phys * 2); asd_ddbsite_write_byte(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, settable_max_contexts),0); asd_ddbsite_write_byte(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, conn_not_active), 0xFF); asd_ddbsite_write_byte(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, phy_is_up), 0x00); /* DDB 0 is reserved */ set_bit(0, asd_ha->hw_prof.ddb_bitmap); } static void asd_seq_init_ddb_sites(struct asd_ha_struct *asd_ha) { unsigned int i; unsigned int ddb_site; for (ddb_site = 0 ; ddb_site < ASD_MAX_DDBS; ddb_site++) for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4) asd_ddbsite_write_dword(asd_ha, ddb_site, i, 0); } /** * asd_seq_setup_seqs -- setup and initialize central and link sequencers * @asd_ha: pointer to host adapter structure */ static void asd_seq_setup_seqs(struct asd_ha_struct *asd_ha) { int lseq; u8 lseq_mask; /* Initialize DDB sites */ asd_seq_init_ddb_sites(asd_ha); /* Initialize SCB sites. Done first to compute some values which * the rest of the init code depends on. */ asd_init_scb_sites(asd_ha); /* Initialize CSEQ Scratch RAM registers. */ asd_init_cseq_scratch(asd_ha); /* Initialize LmSEQ Scratch RAM registers. */ asd_init_lseq_scratch(asd_ha); /* Initialize CSEQ CIO registers. */ asd_init_cseq_cio(asd_ha); asd_init_ddb_0(asd_ha); /* Initialize LmSEQ CIO registers. */ lseq_mask = asd_ha->hw_prof.enabled_phys; for_each_sequencer(lseq_mask, lseq_mask, lseq) asd_init_lseq_cio(asd_ha, lseq); asd_post_init_cseq(asd_ha); } /** * asd_seq_start_cseq -- start the central sequencer, CSEQ * @asd_ha: pointer to host adapter structure */ static int asd_seq_start_cseq(struct asd_ha_struct *asd_ha) { /* Reset the ARP2 instruction to location zero. */ asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop); /* Unpause the CSEQ */ return asd_unpause_cseq(asd_ha); } /** * asd_seq_start_lseq -- start a link sequencer * @asd_ha: pointer to host adapter structure * @lseq: the link sequencer of interest */ static int asd_seq_start_lseq(struct asd_ha_struct *asd_ha, int lseq) { /* Reset the ARP2 instruction to location zero. */ asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop); /* Unpause the LmSEQ */ return asd_seq_unpause_lseq(asd_ha, lseq); } int asd_release_firmware(void) { if (sequencer_fw) release_firmware(sequencer_fw); return 0; } static int asd_request_firmware(struct asd_ha_struct *asd_ha) { int err, i; struct sequencer_file_header header; const struct sequencer_file_header *hdr_ptr; u32 csum = 0; u16 *ptr_cseq_vecs, *ptr_lseq_vecs; if (sequencer_fw) /* already loaded */ return 0; err = request_firmware(&sequencer_fw, SAS_RAZOR_SEQUENCER_FW_FILE, &asd_ha->pcidev->dev); if (err) return err; hdr_ptr = (const struct sequencer_file_header *)sequencer_fw->data; header.csum = le32_to_cpu(hdr_ptr->csum); header.major = le32_to_cpu(hdr_ptr->major); header.minor = le32_to_cpu(hdr_ptr->minor); header.cseq_table_offset = le32_to_cpu(hdr_ptr->cseq_table_offset); header.cseq_table_size = le32_to_cpu(hdr_ptr->cseq_table_size); header.lseq_table_offset = le32_to_cpu(hdr_ptr->lseq_table_offset); header.lseq_table_size = le32_to_cpu(hdr_ptr->lseq_table_size); header.cseq_code_offset = le32_to_cpu(hdr_ptr->cseq_code_offset); header.cseq_code_size = le32_to_cpu(hdr_ptr->cseq_code_size); header.lseq_code_offset = le32_to_cpu(hdr_ptr->lseq_code_offset); header.lseq_code_size = le32_to_cpu(hdr_ptr->lseq_code_size); header.mode2_task = le16_to_cpu(hdr_ptr->mode2_task); header.cseq_idle_loop = le16_to_cpu(hdr_ptr->cseq_idle_loop); header.lseq_idle_loop = le16_to_cpu(hdr_ptr->lseq_idle_loop); for (i = sizeof(header.csum); i < sequencer_fw->size; i++) csum += sequencer_fw->data[i]; if (csum != header.csum) { asd_printk("Firmware file checksum mismatch\n"); return -EINVAL; } if (header.cseq_table_size != CSEQ_NUM_VECS || header.lseq_table_size != LSEQ_NUM_VECS) { asd_printk("Firmware file table size mismatch\n"); return -EINVAL; } asd_printk("Found sequencer Firmware version %d.%d (%s)\n", header.major, header.minor, hdr_ptr->version); if (header.major != SAS_RAZOR_SEQUENCER_FW_MAJOR) { asd_printk("Firmware Major Version Mismatch;" "driver requires version %d.X", SAS_RAZOR_SEQUENCER_FW_MAJOR); return -EINVAL; } ptr_cseq_vecs = (u16 *)&sequencer_fw->data[header.cseq_table_offset]; ptr_lseq_vecs = (u16 *)&sequencer_fw->data[header.lseq_table_offset]; mode2_task = header.mode2_task; cseq_idle_loop = header.cseq_idle_loop; lseq_idle_loop = header.lseq_idle_loop; for (i = 0; i < CSEQ_NUM_VECS; i++) cseq_vecs[i] = le16_to_cpu(ptr_cseq_vecs[i]); for (i = 0; i < LSEQ_NUM_VECS; i++) lseq_vecs[i] = le16_to_cpu(ptr_lseq_vecs[i]); cseq_code = &sequencer_fw->data[header.cseq_code_offset]; cseq_code_size = header.cseq_code_size; lseq_code = &sequencer_fw->data[header.lseq_code_offset]; lseq_code_size = header.lseq_code_size; return 0; } int asd_init_seqs(struct asd_ha_struct *asd_ha) { int err; err = asd_request_firmware(asd_ha); if (err) { asd_printk("Failed to load sequencer firmware file %s, error %d\n", SAS_RAZOR_SEQUENCER_FW_FILE, err); return err; } err = asd_seq_download_seqs(asd_ha); if (err) { asd_printk("couldn't download sequencers for %s\n", pci_name(asd_ha->pcidev)); return err; } asd_seq_setup_seqs(asd_ha); return 0; } int asd_start_seqs(struct asd_ha_struct *asd_ha) { int err; u8 lseq_mask; int lseq; err = asd_seq_start_cseq(asd_ha); if (err) { asd_printk("couldn't start CSEQ for %s\n", pci_name(asd_ha->pcidev)); return err; } lseq_mask = asd_ha->hw_prof.enabled_phys; for_each_sequencer(lseq_mask, lseq_mask, lseq) { err = asd_seq_start_lseq(asd_ha, lseq); if (err) { asd_printk("coudln't start LSEQ %d for %s\n", lseq, pci_name(asd_ha->pcidev)); return err; } } return 0; } /** * asd_update_port_links -- update port_map_by_links and phy_is_up * @sas_phy: pointer to the phy which has been added to a port * * 1) When a link reset has completed and we got BYTES DMAED with a * valid frame we call this function for that phy, to indicate that * the phy is up, i.e. we update the phy_is_up in DDB 0. The * sequencer checks phy_is_up when pending SCBs are to be sent, and * when an open address frame has been received. * * 2) When we know of ports, we call this function to update the map * of phys participaing in that port, i.e. we update the * port_map_by_links in DDB 0. When a HARD_RESET primitive has been * received, the sequencer disables all phys in that port. * port_map_by_links is also used as the conn_mask byte in the * initiator/target port DDB. */ void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy) { const u8 phy_mask = (u8) phy->asd_port->phy_mask; u8 phy_is_up; u8 mask; int i, err; unsigned long flags; spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); for_each_phy(phy_mask, mask, i) asd_ddbsite_write_byte(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, port_map_by_links)+i,phy_mask); for (i = 0; i < 12; i++) { phy_is_up = asd_ddbsite_read_byte(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, phy_is_up)); err = asd_ddbsite_update_byte(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, phy_is_up), phy_is_up, phy_is_up | phy_mask); if (!err) break; else if (err == -EFAULT) { asd_printk("phy_is_up: parity error in DDB 0\n"); break; } } spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); if (err) asd_printk("couldn't update DDB 0:error:%d\n", err); } MODULE_FIRMWARE(SAS_RAZOR_SEQUENCER_FW_FILE);
gpl-2.0
BlownFuze/i717_TW_JBkernel
sound/oss/dmabuf.c
9378
35709
/* * sound/oss/dmabuf.c * * The DMA buffer manager for digitized voice applications */ /* * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. * * Thomas Sailer : moved several static variables into struct audio_operations * (which is grossly misnamed btw.) because they have the same * lifetime as the rest in there and dynamic allocation saves * 12k or so * Thomas Sailer : remove {in,out}_sleep_flag. It was used for the sleeper to * determine if it was woken up by the expiring timeout or by * an explicit wake_up. The return value from schedule_timeout * can be used instead; if 0, the wakeup was due to the timeout. * * Rob Riggs Added persistent DMA buffers (1998/10/17) */ #define BE_CONSERVATIVE #define SAMPLE_ROUNDUP 0 #include <linux/mm.h> #include <linux/gfp.h> #include "sound_config.h" #define DMAP_FREE_ON_CLOSE 0 #define DMAP_KEEP_ON_CLOSE 1 extern int sound_dmap_flag; static void dma_reset_output(int dev); static void dma_reset_input(int dev); static int local_start_dma(struct audio_operations *adev, unsigned long physaddr, int count, int dma_mode); static int debugmem; /* switched off by default */ static int dma_buffsize = DSP_BUFFSIZE; static long dmabuf_timeout(struct dma_buffparms *dmap) { long tmout; tmout = (dmap->fragment_size * HZ) / dmap->data_rate; tmout += HZ / 5; /* Some safety distance */ if (tmout < (HZ / 2)) tmout = HZ / 2; if (tmout > 20 * HZ) tmout = 20 * HZ; return tmout; } static int sound_alloc_dmap(struct dma_buffparms *dmap) { char *start_addr, *end_addr; int dma_pagesize; int sz, size; struct page *page; dmap->mapping_flags &= ~DMA_MAP_MAPPED; if (dmap->raw_buf != NULL) return 0; /* Already done */ if (dma_buffsize < 4096) dma_buffsize = 4096; dma_pagesize = (dmap->dma < 4) ? (64 * 1024) : (128 * 1024); /* * Now check for the Cyrix problem. */ if(isa_dma_bridge_buggy==2) dma_pagesize=32768; dmap->raw_buf = NULL; dmap->buffsize = dma_buffsize; if (dmap->buffsize > dma_pagesize) dmap->buffsize = dma_pagesize; start_addr = NULL; /* * Now loop until we get a free buffer. Try to get smaller buffer if * it fails. Don't accept smaller than 8k buffer for performance * reasons. */ while (start_addr == NULL && dmap->buffsize > PAGE_SIZE) { for (sz = 0, size = PAGE_SIZE; size < dmap->buffsize; sz++, size <<= 1); dmap->buffsize = PAGE_SIZE * (1 << sz); start_addr = (char *) __get_free_pages(GFP_ATOMIC|GFP_DMA|__GFP_NOWARN, sz); if (start_addr == NULL) dmap->buffsize /= 2; } if (start_addr == NULL) { printk(KERN_WARNING "Sound error: Couldn't allocate DMA buffer\n"); return -ENOMEM; } else { /* make some checks */ end_addr = start_addr + dmap->buffsize - 1; if (debugmem) printk(KERN_DEBUG "sound: start 0x%lx, end 0x%lx\n", (long) start_addr, (long) end_addr); /* now check if it fits into the same dma-pagesize */ if (((long) start_addr & ~(dma_pagesize - 1)) != ((long) end_addr & ~(dma_pagesize - 1)) || end_addr >= (char *) (MAX_DMA_ADDRESS)) { printk(KERN_ERR "sound: Got invalid address 0x%lx for %db DMA-buffer\n", (long) start_addr, dmap->buffsize); return -EFAULT; } } dmap->raw_buf = start_addr; dmap->raw_buf_phys = virt_to_bus(start_addr); for (page = virt_to_page(start_addr); page <= virt_to_page(end_addr); page++) SetPageReserved(page); return 0; } static void sound_free_dmap(struct dma_buffparms *dmap) { int sz, size; struct page *page; unsigned long start_addr, end_addr; if (dmap->raw_buf == NULL) return; if (dmap->mapping_flags & DMA_MAP_MAPPED) return; /* Don't free mmapped buffer. Will use it next time */ for (sz = 0, size = PAGE_SIZE; size < dmap->buffsize; sz++, size <<= 1); start_addr = (unsigned long) dmap->raw_buf; end_addr = start_addr + dmap->buffsize; for (page = virt_to_page(start_addr); page <= virt_to_page(end_addr); page++) ClearPageReserved(page); free_pages((unsigned long) dmap->raw_buf, sz); dmap->raw_buf = NULL; } /* Intel version !!!!!!!!! */ static int sound_start_dma(struct dma_buffparms *dmap, unsigned long physaddr, int count, int dma_mode) { unsigned long flags; int chan = dmap->dma; /* printk( "Start DMA%d %d, %d\n", chan, (int)(physaddr-dmap->raw_buf_phys), count); */ flags = claim_dma_lock(); disable_dma(chan); clear_dma_ff(chan); set_dma_mode(chan, dma_mode); set_dma_addr(chan, physaddr); set_dma_count(chan, count); enable_dma(chan); release_dma_lock(flags); return 0; } static void dma_init_buffers(struct dma_buffparms *dmap) { dmap->qlen = dmap->qhead = dmap->qtail = dmap->user_counter = 0; dmap->byte_counter = 0; dmap->max_byte_counter = 8000 * 60 * 60; dmap->bytes_in_use = dmap->buffsize; dmap->dma_mode = DMODE_NONE; dmap->mapping_flags = 0; dmap->neutral_byte = 0x80; dmap->data_rate = 8000; dmap->cfrag = -1; dmap->closing = 0; dmap->nbufs = 1; dmap->flags = DMA_BUSY; /* Other flags off */ } static int open_dmap(struct audio_operations *adev, int mode, struct dma_buffparms *dmap) { int err; if (dmap->flags & DMA_BUSY) return -EBUSY; if ((err = sound_alloc_dmap(dmap)) < 0) return err; if (dmap->raw_buf == NULL) { printk(KERN_WARNING "Sound: DMA buffers not available\n"); return -ENOSPC; /* Memory allocation failed during boot */ } if (dmap->dma >= 0 && sound_open_dma(dmap->dma, adev->name)) { printk(KERN_WARNING "Unable to grab(2) DMA%d for the audio driver\n", dmap->dma); return -EBUSY; } dma_init_buffers(dmap); spin_lock_init(&dmap->lock); dmap->open_mode = mode; dmap->subdivision = dmap->underrun_count = 0; dmap->fragment_size = 0; dmap->max_fragments = 65536; /* Just a large value */ dmap->byte_counter = 0; dmap->max_byte_counter = 8000 * 60 * 60; dmap->applic_profile = APF_NORMAL; dmap->needs_reorg = 1; dmap->audio_callback = NULL; dmap->callback_parm = 0; return 0; } static void close_dmap(struct audio_operations *adev, struct dma_buffparms *dmap) { unsigned long flags; if (dmap->dma >= 0) { sound_close_dma(dmap->dma); flags=claim_dma_lock(); disable_dma(dmap->dma); release_dma_lock(flags); } if (dmap->flags & DMA_BUSY) dmap->dma_mode = DMODE_NONE; dmap->flags &= ~DMA_BUSY; if (sound_dmap_flag == DMAP_FREE_ON_CLOSE) sound_free_dmap(dmap); } static unsigned int default_set_bits(int dev, unsigned int bits) { mm_segment_t fs = get_fs(); set_fs(get_ds()); audio_devs[dev]->d->ioctl(dev, SNDCTL_DSP_SETFMT, (void __user *)&bits); set_fs(fs); return bits; } static int default_set_speed(int dev, int speed) { mm_segment_t fs = get_fs(); set_fs(get_ds()); audio_devs[dev]->d->ioctl(dev, SNDCTL_DSP_SPEED, (void __user *)&speed); set_fs(fs); return speed; } static short default_set_channels(int dev, short channels) { int c = channels; mm_segment_t fs = get_fs(); set_fs(get_ds()); audio_devs[dev]->d->ioctl(dev, SNDCTL_DSP_CHANNELS, (void __user *)&c); set_fs(fs); return c; } static void check_driver(struct audio_driver *d) { if (d->set_speed == NULL) d->set_speed = default_set_speed; if (d->set_bits == NULL) d->set_bits = default_set_bits; if (d->set_channels == NULL) d->set_channels = default_set_channels; } int DMAbuf_open(int dev, int mode) { struct audio_operations *adev = audio_devs[dev]; int retval; struct dma_buffparms *dmap_in = NULL; struct dma_buffparms *dmap_out = NULL; if (!adev) return -ENXIO; if (!(adev->flags & DMA_DUPLEX)) adev->dmap_in = adev->dmap_out; check_driver(adev->d); if ((retval = adev->d->open(dev, mode)) < 0) return retval; dmap_out = adev->dmap_out; dmap_in = adev->dmap_in; if (dmap_in == dmap_out) adev->flags &= ~DMA_DUPLEX; if (mode & OPEN_WRITE) { if ((retval = open_dmap(adev, mode, dmap_out)) < 0) { adev->d->close(dev); return retval; } } adev->enable_bits = mode; if (mode == OPEN_READ || (mode != OPEN_WRITE && (adev->flags & DMA_DUPLEX))) { if ((retval = open_dmap(adev, mode, dmap_in)) < 0) { adev->d->close(dev); if (mode & OPEN_WRITE) close_dmap(adev, dmap_out); return retval; } } adev->open_mode = mode; adev->go = 1; adev->d->set_bits(dev, 8); adev->d->set_channels(dev, 1); adev->d->set_speed(dev, DSP_DEFAULT_SPEED); if (adev->dmap_out->dma_mode == DMODE_OUTPUT) memset(adev->dmap_out->raw_buf, adev->dmap_out->neutral_byte, adev->dmap_out->bytes_in_use); return 0; } /* MUST not hold the spinlock */ void DMAbuf_reset(int dev) { if (audio_devs[dev]->open_mode & OPEN_WRITE) dma_reset_output(dev); if (audio_devs[dev]->open_mode & OPEN_READ) dma_reset_input(dev); } static void dma_reset_output(int dev) { struct audio_operations *adev = audio_devs[dev]; unsigned long flags,f ; struct dma_buffparms *dmap = adev->dmap_out; if (!(dmap->flags & DMA_STARTED)) /* DMA is not active */ return; /* * First wait until the current fragment has been played completely */ spin_lock_irqsave(&dmap->lock,flags); adev->dmap_out->flags |= DMA_SYNCING; adev->dmap_out->underrun_count = 0; if (!signal_pending(current) && adev->dmap_out->qlen && adev->dmap_out->underrun_count == 0){ spin_unlock_irqrestore(&dmap->lock,flags); interruptible_sleep_on_timeout(&adev->out_sleeper, dmabuf_timeout(dmap)); spin_lock_irqsave(&dmap->lock,flags); } adev->dmap_out->flags &= ~(DMA_SYNCING | DMA_ACTIVE); /* * Finally shut the device off */ if (!(adev->flags & DMA_DUPLEX) || !adev->d->halt_output) adev->d->halt_io(dev); else adev->d->halt_output(dev); adev->dmap_out->flags &= ~DMA_STARTED; f=claim_dma_lock(); clear_dma_ff(dmap->dma); disable_dma(dmap->dma); release_dma_lock(f); dmap->byte_counter = 0; reorganize_buffers(dev, adev->dmap_out, 0); dmap->qlen = dmap->qhead = dmap->qtail = dmap->user_counter = 0; spin_unlock_irqrestore(&dmap->lock,flags); } static void dma_reset_input(int dev) { struct audio_operations *adev = audio_devs[dev]; unsigned long flags; struct dma_buffparms *dmap = adev->dmap_in; spin_lock_irqsave(&dmap->lock,flags); if (!(adev->flags & DMA_DUPLEX) || !adev->d->halt_input) adev->d->halt_io(dev); else adev->d->halt_input(dev); adev->dmap_in->flags &= ~DMA_STARTED; dmap->qlen = dmap->qhead = dmap->qtail = dmap->user_counter = 0; dmap->byte_counter = 0; reorganize_buffers(dev, adev->dmap_in, 1); spin_unlock_irqrestore(&dmap->lock,flags); } /* MUST be called with holding the dmap->lock */ void DMAbuf_launch_output(int dev, struct dma_buffparms *dmap) { struct audio_operations *adev = audio_devs[dev]; if (!((adev->enable_bits * adev->go) & PCM_ENABLE_OUTPUT)) return; /* Don't start DMA yet */ dmap->dma_mode = DMODE_OUTPUT; if (!(dmap->flags & DMA_ACTIVE) || !(adev->flags & DMA_AUTOMODE) || (dmap->flags & DMA_NODMA)) { if (!(dmap->flags & DMA_STARTED)) { reorganize_buffers(dev, dmap, 0); if (adev->d->prepare_for_output(dev, dmap->fragment_size, dmap->nbufs)) return; if (!(dmap->flags & DMA_NODMA)) local_start_dma(adev, dmap->raw_buf_phys, dmap->bytes_in_use,DMA_MODE_WRITE); dmap->flags |= DMA_STARTED; } if (dmap->counts[dmap->qhead] == 0) dmap->counts[dmap->qhead] = dmap->fragment_size; dmap->dma_mode = DMODE_OUTPUT; adev->d->output_block(dev, dmap->raw_buf_phys + dmap->qhead * dmap->fragment_size, dmap->counts[dmap->qhead], 1); if (adev->d->trigger) adev->d->trigger(dev,adev->enable_bits * adev->go); } dmap->flags |= DMA_ACTIVE; } int DMAbuf_sync(int dev) { struct audio_operations *adev = audio_devs[dev]; unsigned long flags; int n = 0; struct dma_buffparms *dmap; if (!adev->go && !(adev->enable_bits & PCM_ENABLE_OUTPUT)) return 0; if (adev->dmap_out->dma_mode == DMODE_OUTPUT) { dmap = adev->dmap_out; spin_lock_irqsave(&dmap->lock,flags); if (dmap->qlen > 0 && !(dmap->flags & DMA_ACTIVE)) DMAbuf_launch_output(dev, dmap); adev->dmap_out->flags |= DMA_SYNCING; adev->dmap_out->underrun_count = 0; while (!signal_pending(current) && n++ < adev->dmap_out->nbufs && adev->dmap_out->qlen && adev->dmap_out->underrun_count == 0) { long t = dmabuf_timeout(dmap); spin_unlock_irqrestore(&dmap->lock,flags); /* FIXME: not safe may miss events */ t = interruptible_sleep_on_timeout(&adev->out_sleeper, t); spin_lock_irqsave(&dmap->lock,flags); if (!t) { adev->dmap_out->flags &= ~DMA_SYNCING; spin_unlock_irqrestore(&dmap->lock,flags); return adev->dmap_out->qlen; } } adev->dmap_out->flags &= ~(DMA_SYNCING | DMA_ACTIVE); /* * Some devices such as GUS have huge amount of on board RAM for the * audio data. We have to wait until the device has finished playing. */ /* still holding the lock */ if (adev->d->local_qlen) { /* Device has hidden buffers */ while (!signal_pending(current) && adev->d->local_qlen(dev)){ spin_unlock_irqrestore(&dmap->lock,flags); interruptible_sleep_on_timeout(&adev->out_sleeper, dmabuf_timeout(dmap)); spin_lock_irqsave(&dmap->lock,flags); } } spin_unlock_irqrestore(&dmap->lock,flags); } adev->dmap_out->dma_mode = DMODE_NONE; return adev->dmap_out->qlen; } int DMAbuf_release(int dev, int mode) { struct audio_operations *adev = audio_devs[dev]; struct dma_buffparms *dmap; unsigned long flags; dmap = adev->dmap_out; if (adev->open_mode & OPEN_WRITE) adev->dmap_out->closing = 1; if (adev->open_mode & OPEN_READ){ adev->dmap_in->closing = 1; dmap = adev->dmap_in; } if (adev->open_mode & OPEN_WRITE) if (!(adev->dmap_out->mapping_flags & DMA_MAP_MAPPED)) if (!signal_pending(current) && (adev->dmap_out->dma_mode == DMODE_OUTPUT)) DMAbuf_sync(dev); if (adev->dmap_out->dma_mode == DMODE_OUTPUT) memset(adev->dmap_out->raw_buf, adev->dmap_out->neutral_byte, adev->dmap_out->bytes_in_use); DMAbuf_reset(dev); spin_lock_irqsave(&dmap->lock,flags); adev->d->close(dev); if (adev->open_mode & OPEN_WRITE) close_dmap(adev, adev->dmap_out); if (adev->open_mode == OPEN_READ || (adev->open_mode != OPEN_WRITE && (adev->flags & DMA_DUPLEX))) close_dmap(adev, adev->dmap_in); adev->open_mode = 0; spin_unlock_irqrestore(&dmap->lock,flags); return 0; } /* called with dmap->lock dold */ int DMAbuf_activate_recording(int dev, struct dma_buffparms *dmap) { struct audio_operations *adev = audio_devs[dev]; int err; if (!(adev->open_mode & OPEN_READ)) return 0; if (!(adev->enable_bits & PCM_ENABLE_INPUT)) return 0; if (dmap->dma_mode == DMODE_OUTPUT) { /* Direction change */ /* release lock - it's not recursive */ spin_unlock_irq(&dmap->lock); DMAbuf_sync(dev); DMAbuf_reset(dev); spin_lock_irq(&dmap->lock); dmap->dma_mode = DMODE_NONE; } if (!dmap->dma_mode) { reorganize_buffers(dev, dmap, 1); if ((err = adev->d->prepare_for_input(dev, dmap->fragment_size, dmap->nbufs)) < 0) return err; dmap->dma_mode = DMODE_INPUT; } if (!(dmap->flags & DMA_ACTIVE)) { if (dmap->needs_reorg) reorganize_buffers(dev, dmap, 0); local_start_dma(adev, dmap->raw_buf_phys, dmap->bytes_in_use, DMA_MODE_READ); adev->d->start_input(dev, dmap->raw_buf_phys + dmap->qtail * dmap->fragment_size, dmap->fragment_size, 0); dmap->flags |= DMA_ACTIVE; if (adev->d->trigger) adev->d->trigger(dev, adev->enable_bits * adev->go); } return 0; } /* acquires lock */ int DMAbuf_getrdbuffer(int dev, char **buf, int *len, int dontblock) { struct audio_operations *adev = audio_devs[dev]; unsigned long flags; int err = 0, n = 0; struct dma_buffparms *dmap = adev->dmap_in; int go; if (!(adev->open_mode & OPEN_READ)) return -EIO; spin_lock_irqsave(&dmap->lock,flags); if (dmap->needs_reorg) reorganize_buffers(dev, dmap, 0); if (adev->dmap_in->mapping_flags & DMA_MAP_MAPPED) { /* printk(KERN_WARNING "Sound: Can't read from mmapped device (1)\n");*/ spin_unlock_irqrestore(&dmap->lock,flags); return -EINVAL; } else while (dmap->qlen <= 0 && n++ < 10) { long timeout = MAX_SCHEDULE_TIMEOUT; if (!(adev->enable_bits & PCM_ENABLE_INPUT) || !adev->go) { spin_unlock_irqrestore(&dmap->lock,flags); return -EAGAIN; } if ((err = DMAbuf_activate_recording(dev, dmap)) < 0) { spin_unlock_irqrestore(&dmap->lock,flags); return err; } /* Wait for the next block */ if (dontblock) { spin_unlock_irqrestore(&dmap->lock,flags); return -EAGAIN; } if ((go = adev->go)) timeout = dmabuf_timeout(dmap); spin_unlock_irqrestore(&dmap->lock,flags); timeout = interruptible_sleep_on_timeout(&adev->in_sleeper, timeout); if (!timeout) { /* FIXME: include device name */ err = -EIO; printk(KERN_WARNING "Sound: DMA (input) timed out - IRQ/DRQ config error?\n"); dma_reset_input(dev); } else err = -EINTR; spin_lock_irqsave(&dmap->lock,flags); } spin_unlock_irqrestore(&dmap->lock,flags); if (dmap->qlen <= 0) return err ? err : -EINTR; *buf = &dmap->raw_buf[dmap->qhead * dmap->fragment_size + dmap->counts[dmap->qhead]]; *len = dmap->fragment_size - dmap->counts[dmap->qhead]; return dmap->qhead; } int DMAbuf_rmchars(int dev, int buff_no, int c) { struct audio_operations *adev = audio_devs[dev]; struct dma_buffparms *dmap = adev->dmap_in; int p = dmap->counts[dmap->qhead] + c; if (dmap->mapping_flags & DMA_MAP_MAPPED) { /* printk("Sound: Can't read from mmapped device (2)\n");*/ return -EINVAL; } else if (dmap->qlen <= 0) return -EIO; else if (p >= dmap->fragment_size) { /* This buffer is completely empty */ dmap->counts[dmap->qhead] = 0; dmap->qlen--; dmap->qhead = (dmap->qhead + 1) % dmap->nbufs; } else dmap->counts[dmap->qhead] = p; return 0; } /* MUST be called with dmap->lock hold */ int DMAbuf_get_buffer_pointer(int dev, struct dma_buffparms *dmap, int direction) { /* * Try to approximate the active byte position of the DMA pointer within the * buffer area as well as possible. */ int pos; unsigned long f; if (!(dmap->flags & DMA_ACTIVE)) pos = 0; else { int chan = dmap->dma; f=claim_dma_lock(); clear_dma_ff(chan); if(!isa_dma_bridge_buggy) disable_dma(dmap->dma); pos = get_dma_residue(chan); pos = dmap->bytes_in_use - pos; if (!(dmap->mapping_flags & DMA_MAP_MAPPED)) { if (direction == DMODE_OUTPUT) { if (dmap->qhead == 0) if (pos > dmap->fragment_size) pos = 0; } else { if (dmap->qtail == 0) if (pos > dmap->fragment_size) pos = 0; } } if (pos < 0) pos = 0; if (pos >= dmap->bytes_in_use) pos = 0; if(!isa_dma_bridge_buggy) enable_dma(dmap->dma); release_dma_lock(f); } /* printk( "%04x ", pos); */ return pos; } /* * DMAbuf_start_devices() is called by the /dev/music driver to start * one or more audio devices at desired moment. */ void DMAbuf_start_devices(unsigned int devmask) { struct audio_operations *adev; int dev; for (dev = 0; dev < num_audiodevs; dev++) { if (!(devmask & (1 << dev))) continue; if (!(adev = audio_devs[dev])) continue; if (adev->open_mode == 0) continue; if (adev->go) continue; /* OK to start the device */ adev->go = 1; if (adev->d->trigger) adev->d->trigger(dev,adev->enable_bits * adev->go); } } /* via poll called without a lock ?*/ int DMAbuf_space_in_queue(int dev) { struct audio_operations *adev = audio_devs[dev]; int len, max, tmp; struct dma_buffparms *dmap = adev->dmap_out; int lim = dmap->nbufs; if (lim < 2) lim = 2; if (dmap->qlen >= lim) /* No space at all */ return 0; /* * Verify that there are no more pending buffers than the limit * defined by the process. */ max = dmap->max_fragments; if (max > lim) max = lim; len = dmap->qlen; if (adev->d->local_qlen) { tmp = adev->d->local_qlen(dev); if (tmp && len) tmp--; /* This buffer has been counted twice */ len += tmp; } if (dmap->byte_counter % dmap->fragment_size) /* There is a partial fragment */ len = len + 1; if (len >= max) return 0; return max - len; } /* MUST not hold the spinlock - this function may sleep */ static int output_sleep(int dev, int dontblock) { struct audio_operations *adev = audio_devs[dev]; int err = 0; struct dma_buffparms *dmap = adev->dmap_out; long timeout; long timeout_value; if (dontblock) return -EAGAIN; if (!(adev->enable_bits & PCM_ENABLE_OUTPUT)) return -EAGAIN; /* * Wait for free space */ if (signal_pending(current)) return -EINTR; timeout = (adev->go && !(dmap->flags & DMA_NOTIMEOUT)); if (timeout) timeout_value = dmabuf_timeout(dmap); else timeout_value = MAX_SCHEDULE_TIMEOUT; timeout_value = interruptible_sleep_on_timeout(&adev->out_sleeper, timeout_value); if (timeout != MAX_SCHEDULE_TIMEOUT && !timeout_value) { printk(KERN_WARNING "Sound: DMA (output) timed out - IRQ/DRQ config error?\n"); dma_reset_output(dev); } else { if (signal_pending(current)) err = -EINTR; } return err; } /* called with the lock held */ static int find_output_space(int dev, char **buf, int *size) { struct audio_operations *adev = audio_devs[dev]; struct dma_buffparms *dmap = adev->dmap_out; unsigned long active_offs; long len, offs; int maxfrags; int occupied_bytes = (dmap->user_counter % dmap->fragment_size); *buf = dmap->raw_buf; if (!(maxfrags = DMAbuf_space_in_queue(dev)) && !occupied_bytes) return 0; #ifdef BE_CONSERVATIVE active_offs = dmap->byte_counter + dmap->qhead * dmap->fragment_size; #else active_offs = max(DMAbuf_get_buffer_pointer(dev, dmap, DMODE_OUTPUT), 0); /* Check for pointer wrapping situation */ if (active_offs >= dmap->bytes_in_use) active_offs = 0; active_offs += dmap->byte_counter; #endif offs = (dmap->user_counter % dmap->bytes_in_use) & ~SAMPLE_ROUNDUP; if (offs < 0 || offs >= dmap->bytes_in_use) { printk(KERN_ERR "Sound: Got unexpected offs %ld. Giving up.\n", offs); printk("Counter = %ld, bytes=%d\n", dmap->user_counter, dmap->bytes_in_use); return 0; } *buf = dmap->raw_buf + offs; len = active_offs + dmap->bytes_in_use - dmap->user_counter; /* Number of unused bytes in buffer */ if ((offs + len) > dmap->bytes_in_use) len = dmap->bytes_in_use - offs; if (len < 0) { return 0; } if (len > ((maxfrags * dmap->fragment_size) - occupied_bytes)) len = (maxfrags * dmap->fragment_size) - occupied_bytes; *size = len & ~SAMPLE_ROUNDUP; return (*size > 0); } /* acquires lock */ int DMAbuf_getwrbuffer(int dev, char **buf, int *size, int dontblock) { struct audio_operations *adev = audio_devs[dev]; unsigned long flags; int err = -EIO; struct dma_buffparms *dmap = adev->dmap_out; if (dmap->mapping_flags & DMA_MAP_MAPPED) { /* printk(KERN_DEBUG "Sound: Can't write to mmapped device (3)\n");*/ return -EINVAL; } spin_lock_irqsave(&dmap->lock,flags); if (dmap->needs_reorg) reorganize_buffers(dev, dmap, 0); if (dmap->dma_mode == DMODE_INPUT) { /* Direction change */ spin_unlock_irqrestore(&dmap->lock,flags); DMAbuf_reset(dev); spin_lock_irqsave(&dmap->lock,flags); } dmap->dma_mode = DMODE_OUTPUT; while (find_output_space(dev, buf, size) <= 0) { spin_unlock_irqrestore(&dmap->lock,flags); if ((err = output_sleep(dev, dontblock)) < 0) { return err; } spin_lock_irqsave(&dmap->lock,flags); } spin_unlock_irqrestore(&dmap->lock,flags); return 0; } /* has to acquire dmap->lock */ int DMAbuf_move_wrpointer(int dev, int l) { struct audio_operations *adev = audio_devs[dev]; struct dma_buffparms *dmap = adev->dmap_out; unsigned long ptr; unsigned long end_ptr, p; int post; unsigned long flags; spin_lock_irqsave(&dmap->lock,flags); post= (dmap->flags & DMA_POST); ptr = (dmap->user_counter / dmap->fragment_size) * dmap->fragment_size; dmap->flags &= ~DMA_POST; dmap->cfrag = -1; dmap->user_counter += l; dmap->flags |= DMA_DIRTY; if (dmap->byte_counter >= dmap->max_byte_counter) { /* Wrap the byte counters */ long decr = dmap->byte_counter; dmap->byte_counter = (dmap->byte_counter % dmap->bytes_in_use); decr -= dmap->byte_counter; dmap->user_counter -= decr; } end_ptr = (dmap->user_counter / dmap->fragment_size) * dmap->fragment_size; p = (dmap->user_counter - 1) % dmap->bytes_in_use; dmap->neutral_byte = dmap->raw_buf[p]; /* Update the fragment based bookkeeping too */ while (ptr < end_ptr) { dmap->counts[dmap->qtail] = dmap->fragment_size; dmap->qtail = (dmap->qtail + 1) % dmap->nbufs; dmap->qlen++; ptr += dmap->fragment_size; } dmap->counts[dmap->qtail] = dmap->user_counter - ptr; /* * Let the low level driver perform some postprocessing to * the written data. */ if (adev->d->postprocess_write) adev->d->postprocess_write(dev); if (!(dmap->flags & DMA_ACTIVE)) if (dmap->qlen > 1 || (dmap->qlen > 0 && (post || dmap->qlen >= dmap->nbufs - 1))) DMAbuf_launch_output(dev, dmap); spin_unlock_irqrestore(&dmap->lock,flags); return 0; } int DMAbuf_start_dma(int dev, unsigned long physaddr, int count, int dma_mode) { struct audio_operations *adev = audio_devs[dev]; struct dma_buffparms *dmap = (dma_mode == DMA_MODE_WRITE) ? adev->dmap_out : adev->dmap_in; if (dmap->raw_buf == NULL) { printk(KERN_ERR "sound: DMA buffer(1) == NULL\n"); printk("Device %d, chn=%s\n", dev, (dmap == adev->dmap_out) ? "out" : "in"); return 0; } if (dmap->dma < 0) return 0; sound_start_dma(dmap, physaddr, count, dma_mode); return count; } EXPORT_SYMBOL(DMAbuf_start_dma); static int local_start_dma(struct audio_operations *adev, unsigned long physaddr, int count, int dma_mode) { struct dma_buffparms *dmap = (dma_mode == DMA_MODE_WRITE) ? adev->dmap_out : adev->dmap_in; if (dmap->raw_buf == NULL) { printk(KERN_ERR "sound: DMA buffer(2) == NULL\n"); printk(KERN_ERR "Device %s, chn=%s\n", adev->name, (dmap == adev->dmap_out) ? "out" : "in"); return 0; } if (dmap->flags & DMA_NODMA) return 1; if (dmap->dma < 0) return 0; sound_start_dma(dmap, dmap->raw_buf_phys, dmap->bytes_in_use, dma_mode | DMA_AUTOINIT); dmap->flags |= DMA_STARTED; return count; } static void finish_output_interrupt(int dev, struct dma_buffparms *dmap) { struct audio_operations *adev = audio_devs[dev]; if (dmap->audio_callback != NULL) dmap->audio_callback(dev, dmap->callback_parm); wake_up(&adev->out_sleeper); wake_up(&adev->poll_sleeper); } /* called with dmap->lock held in irq context*/ static void do_outputintr(int dev, int dummy) { struct audio_operations *adev = audio_devs[dev]; struct dma_buffparms *dmap = adev->dmap_out; int this_fragment; if (dmap->raw_buf == NULL) { printk(KERN_ERR "Sound: Error. Audio interrupt (%d) after freeing buffers.\n", dev); return; } if (dmap->mapping_flags & DMA_MAP_MAPPED) { /* Virtual memory mapped access */ /* mmapped access */ dmap->qhead = (dmap->qhead + 1) % dmap->nbufs; if (dmap->qhead == 0) { /* Wrapped */ dmap->byte_counter += dmap->bytes_in_use; if (dmap->byte_counter >= dmap->max_byte_counter) { /* Overflow */ long decr = dmap->byte_counter; dmap->byte_counter = (dmap->byte_counter % dmap->bytes_in_use); decr -= dmap->byte_counter; dmap->user_counter -= decr; } } dmap->qlen++; /* Yes increment it (don't decrement) */ if (!(adev->flags & DMA_AUTOMODE)) dmap->flags &= ~DMA_ACTIVE; dmap->counts[dmap->qhead] = dmap->fragment_size; DMAbuf_launch_output(dev, dmap); finish_output_interrupt(dev, dmap); return; } dmap->qlen--; this_fragment = dmap->qhead; dmap->qhead = (dmap->qhead + 1) % dmap->nbufs; if (dmap->qhead == 0) { /* Wrapped */ dmap->byte_counter += dmap->bytes_in_use; if (dmap->byte_counter >= dmap->max_byte_counter) { /* Overflow */ long decr = dmap->byte_counter; dmap->byte_counter = (dmap->byte_counter % dmap->bytes_in_use); decr -= dmap->byte_counter; dmap->user_counter -= decr; } } if (!(adev->flags & DMA_AUTOMODE)) dmap->flags &= ~DMA_ACTIVE; /* * This is dmap->qlen <= 0 except when closing when * dmap->qlen < 0 */ while (dmap->qlen <= -dmap->closing) { dmap->underrun_count++; dmap->qlen++; if ((dmap->flags & DMA_DIRTY) && dmap->applic_profile != APF_CPUINTENS) { dmap->flags &= ~DMA_DIRTY; memset(adev->dmap_out->raw_buf, adev->dmap_out->neutral_byte, adev->dmap_out->buffsize); } dmap->user_counter += dmap->fragment_size; dmap->qtail = (dmap->qtail + 1) % dmap->nbufs; } if (dmap->qlen > 0) DMAbuf_launch_output(dev, dmap); finish_output_interrupt(dev, dmap); } /* called in irq context */ void DMAbuf_outputintr(int dev, int notify_only) { struct audio_operations *adev = audio_devs[dev]; unsigned long flags; struct dma_buffparms *dmap = adev->dmap_out; spin_lock_irqsave(&dmap->lock,flags); if (!(dmap->flags & DMA_NODMA)) { int chan = dmap->dma, pos, n; unsigned long f; f=claim_dma_lock(); if(!isa_dma_bridge_buggy) disable_dma(dmap->dma); clear_dma_ff(chan); pos = dmap->bytes_in_use - get_dma_residue(chan); if(!isa_dma_bridge_buggy) enable_dma(dmap->dma); release_dma_lock(f); pos = pos / dmap->fragment_size; /* Actual qhead */ if (pos < 0 || pos >= dmap->nbufs) pos = 0; n = 0; while (dmap->qhead != pos && n++ < dmap->nbufs) do_outputintr(dev, notify_only); } else do_outputintr(dev, notify_only); spin_unlock_irqrestore(&dmap->lock,flags); } EXPORT_SYMBOL(DMAbuf_outputintr); /* called with dmap->lock held in irq context */ static void do_inputintr(int dev) { struct audio_operations *adev = audio_devs[dev]; struct dma_buffparms *dmap = adev->dmap_in; if (dmap->raw_buf == NULL) { printk(KERN_ERR "Sound: Fatal error. Audio interrupt after freeing buffers.\n"); return; } if (dmap->mapping_flags & DMA_MAP_MAPPED) { dmap->qtail = (dmap->qtail + 1) % dmap->nbufs; if (dmap->qtail == 0) { /* Wrapped */ dmap->byte_counter += dmap->bytes_in_use; if (dmap->byte_counter >= dmap->max_byte_counter) { /* Overflow */ long decr = dmap->byte_counter; dmap->byte_counter = (dmap->byte_counter % dmap->bytes_in_use) + dmap->bytes_in_use; decr -= dmap->byte_counter; dmap->user_counter -= decr; } } dmap->qlen++; if (!(adev->flags & DMA_AUTOMODE)) { if (dmap->needs_reorg) reorganize_buffers(dev, dmap, 0); local_start_dma(adev, dmap->raw_buf_phys, dmap->bytes_in_use,DMA_MODE_READ); adev->d->start_input(dev, dmap->raw_buf_phys + dmap->qtail * dmap->fragment_size, dmap->fragment_size, 1); if (adev->d->trigger) adev->d->trigger(dev, adev->enable_bits * adev->go); } dmap->flags |= DMA_ACTIVE; } else if (dmap->qlen >= (dmap->nbufs - 1)) { printk(KERN_WARNING "Sound: Recording overrun\n"); dmap->underrun_count++; /* Just throw away the oldest fragment but keep the engine running */ dmap->qhead = (dmap->qhead + 1) % dmap->nbufs; dmap->qtail = (dmap->qtail + 1) % dmap->nbufs; } else if (dmap->qlen >= 0 && dmap->qlen < dmap->nbufs) { dmap->qlen++; dmap->qtail = (dmap->qtail + 1) % dmap->nbufs; if (dmap->qtail == 0) { /* Wrapped */ dmap->byte_counter += dmap->bytes_in_use; if (dmap->byte_counter >= dmap->max_byte_counter) { /* Overflow */ long decr = dmap->byte_counter; dmap->byte_counter = (dmap->byte_counter % dmap->bytes_in_use) + dmap->bytes_in_use; decr -= dmap->byte_counter; dmap->user_counter -= decr; } } } if (!(adev->flags & DMA_AUTOMODE) || (dmap->flags & DMA_NODMA)) { local_start_dma(adev, dmap->raw_buf_phys, dmap->bytes_in_use, DMA_MODE_READ); adev->d->start_input(dev, dmap->raw_buf_phys + dmap->qtail * dmap->fragment_size, dmap->fragment_size, 1); if (adev->d->trigger) adev->d->trigger(dev,adev->enable_bits * adev->go); } dmap->flags |= DMA_ACTIVE; if (dmap->qlen > 0) { wake_up(&adev->in_sleeper); wake_up(&adev->poll_sleeper); } } /* called in irq context */ void DMAbuf_inputintr(int dev) { struct audio_operations *adev = audio_devs[dev]; struct dma_buffparms *dmap = adev->dmap_in; unsigned long flags; spin_lock_irqsave(&dmap->lock,flags); if (!(dmap->flags & DMA_NODMA)) { int chan = dmap->dma, pos, n; unsigned long f; f=claim_dma_lock(); if(!isa_dma_bridge_buggy) disable_dma(dmap->dma); clear_dma_ff(chan); pos = dmap->bytes_in_use - get_dma_residue(chan); if(!isa_dma_bridge_buggy) enable_dma(dmap->dma); release_dma_lock(f); pos = pos / dmap->fragment_size; /* Actual qhead */ if (pos < 0 || pos >= dmap->nbufs) pos = 0; n = 0; while (dmap->qtail != pos && ++n < dmap->nbufs) do_inputintr(dev); } else do_inputintr(dev); spin_unlock_irqrestore(&dmap->lock,flags); } EXPORT_SYMBOL(DMAbuf_inputintr); void DMAbuf_init(int dev, int dma1, int dma2) { struct audio_operations *adev = audio_devs[dev]; /* * NOTE! This routine could be called several times. */ if (adev && adev->dmap_out == NULL) { if (adev->d == NULL) panic("OSS: audio_devs[%d]->d == NULL\n", dev); if (adev->parent_dev) { /* Use DMA map of the parent dev */ int parent = adev->parent_dev - 1; adev->dmap_out = audio_devs[parent]->dmap_out; adev->dmap_in = audio_devs[parent]->dmap_in; } else { adev->dmap_out = adev->dmap_in = &adev->dmaps[0]; adev->dmap_out->dma = dma1; if (adev->flags & DMA_DUPLEX) { adev->dmap_in = &adev->dmaps[1]; adev->dmap_in->dma = dma2; } } /* Persistent DMA buffers allocated here */ if (sound_dmap_flag == DMAP_KEEP_ON_CLOSE) { if (adev->dmap_in->raw_buf == NULL) sound_alloc_dmap(adev->dmap_in); if (adev->dmap_out->raw_buf == NULL) sound_alloc_dmap(adev->dmap_out); } } } /* No kernel lock - DMAbuf_activate_recording protected by global cli/sti */ static unsigned int poll_input(struct file * file, int dev, poll_table *wait) { struct audio_operations *adev = audio_devs[dev]; struct dma_buffparms *dmap = adev->dmap_in; if (!(adev->open_mode & OPEN_READ)) return 0; if (dmap->mapping_flags & DMA_MAP_MAPPED) { if (dmap->qlen) return POLLIN | POLLRDNORM; return 0; } if (dmap->dma_mode != DMODE_INPUT) { if (dmap->dma_mode == DMODE_NONE && adev->enable_bits & PCM_ENABLE_INPUT && !dmap->qlen && adev->go) { unsigned long flags; spin_lock_irqsave(&dmap->lock,flags); DMAbuf_activate_recording(dev, dmap); spin_unlock_irqrestore(&dmap->lock,flags); } return 0; } if (!dmap->qlen) return 0; return POLLIN | POLLRDNORM; } static unsigned int poll_output(struct file * file, int dev, poll_table *wait) { struct audio_operations *adev = audio_devs[dev]; struct dma_buffparms *dmap = adev->dmap_out; if (!(adev->open_mode & OPEN_WRITE)) return 0; if (dmap->mapping_flags & DMA_MAP_MAPPED) { if (dmap->qlen) return POLLOUT | POLLWRNORM; return 0; } if (dmap->dma_mode == DMODE_INPUT) return 0; if (dmap->dma_mode == DMODE_NONE) return POLLOUT | POLLWRNORM; if (!DMAbuf_space_in_queue(dev)) return 0; return POLLOUT | POLLWRNORM; } unsigned int DMAbuf_poll(struct file * file, int dev, poll_table *wait) { struct audio_operations *adev = audio_devs[dev]; poll_wait(file, &adev->poll_sleeper, wait); return poll_input(file, dev, wait) | poll_output(file, dev, wait); } void DMAbuf_deinit(int dev) { struct audio_operations *adev = audio_devs[dev]; /* This routine is called when driver is being unloaded */ if (!adev) return; /* Persistent DMA buffers deallocated here */ if (sound_dmap_flag == DMAP_KEEP_ON_CLOSE) { sound_free_dmap(adev->dmap_out); if (adev->flags & DMA_DUPLEX) sound_free_dmap(adev->dmap_in); } }
gpl-2.0
F35X70/Z7Mini_NX507J_H128_kernel
arch/arm/kernel/insn.c
9634
1347
#include <linux/bug.h> #include <linux/kernel.h> #include <asm/opcodes.h> static unsigned long __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link) { unsigned long s, j1, j2, i1, i2, imm10, imm11; unsigned long first, second; long offset; offset = (long)addr - (long)(pc + 4); if (offset < -16777216 || offset > 16777214) { WARN_ON_ONCE(1); return 0; } s = (offset >> 24) & 0x1; i1 = (offset >> 23) & 0x1; i2 = (offset >> 22) & 0x1; imm10 = (offset >> 12) & 0x3ff; imm11 = (offset >> 1) & 0x7ff; j1 = (!i1) ^ s; j2 = (!i2) ^ s; first = 0xf000 | (s << 10) | imm10; second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11; if (link) second |= 1 << 14; return __opcode_thumb32_compose(first, second); } static unsigned long __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link) { unsigned long opcode = 0xea000000; long offset; if (link) opcode |= 1 << 24; offset = (long)addr - (long)(pc + 8); if (unlikely(offset < -33554432 || offset > 33554428)) { WARN_ON_ONCE(1); return 0; } offset = (offset >> 2) & 0x00ffffff; return opcode | offset; } unsigned long __arm_gen_branch(unsigned long pc, unsigned long addr, bool link) { if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) return __arm_gen_branch_thumb2(pc, addr, link); else return __arm_gen_branch_arm(pc, addr, link); }
gpl-2.0
SuperPichu/shield-tablet-kernel
arch/sh/mm/uncached.c
12194
1176
#include <linux/init.h> #include <linux/module.h> #include <asm/sizes.h> #include <asm/page.h> #include <asm/addrspace.h> /* * This is the offset of the uncached section from its cached alias. * * Legacy platforms handle trivial transitions between cached and * uncached segments by making use of the 1:1 mapping relationship in * 512MB lowmem, others via a special uncached mapping. * * Default value only valid in 29 bit mode, in 32bit mode this will be * updated by the early PMB initialization code. */ unsigned long cached_to_uncached = SZ_512M; unsigned long uncached_size = SZ_512M; unsigned long uncached_start, uncached_end; EXPORT_SYMBOL(uncached_start); EXPORT_SYMBOL(uncached_end); int virt_addr_uncached(unsigned long kaddr) { return (kaddr >= uncached_start) && (kaddr < uncached_end); } EXPORT_SYMBOL(virt_addr_uncached); void __init uncached_init(void) { #if defined(CONFIG_29BIT) || !defined(CONFIG_MMU) uncached_start = P2SEG; #else uncached_start = memory_end; #endif uncached_end = uncached_start + uncached_size; } void __init uncached_resize(unsigned long size) { uncached_size = size; uncached_end = uncached_start + uncached_size; }
gpl-2.0
ResurrectionRemix-Devices/android_kernel_samsung_smdk4412
drivers/eisa/virtual_root.c
14498
1736
/* * Virtual EISA root driver. * Acts as a placeholder if we don't have a proper EISA bridge. * * (C) 2003 Marc Zyngier <maz@wild-wind.fr.eu.org> * * This code is released under the GPL version 2. */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/eisa.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #if defined(CONFIG_ALPHA_JENSEN) || defined(CONFIG_EISA_VLB_PRIMING) #define EISA_FORCE_PROBE_DEFAULT 1 #else #define EISA_FORCE_PROBE_DEFAULT 0 #endif static int force_probe = EISA_FORCE_PROBE_DEFAULT; static void virtual_eisa_release (struct device *); /* The default EISA device parent (virtual root device). * Now use a platform device, since that's the obvious choice. */ static struct platform_device eisa_root_dev = { .name = "eisa", .id = 0, .dev = { .release = virtual_eisa_release, }, }; static struct eisa_root_device eisa_bus_root = { .dev = &eisa_root_dev.dev, .bus_base_addr = 0, .res = &ioport_resource, .slots = EISA_MAX_SLOTS, .dma_mask = 0xffffffff, }; static void virtual_eisa_release (struct device *dev) { /* nothing really to do here */ } static int __init virtual_eisa_root_init (void) { int r; if ((r = platform_device_register (&eisa_root_dev))) { return r; } eisa_bus_root.force_probe = force_probe; dev_set_drvdata(&eisa_root_dev.dev, &eisa_bus_root); if (eisa_root_register (&eisa_bus_root)) { /* A real bridge may have been registered before * us. So quietly unregister. */ platform_device_unregister (&eisa_root_dev); return -1; } return 0; } module_param (force_probe, int, 0444); device_initcall (virtual_eisa_root_init);
gpl-2.0
stefanbucur/linux-s2e
arch/arm/mach-msm/platsmp.c
163
4359
/* * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * Copyright (c) 2010, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/jiffies.h> #include <linux/smp.h> #include <linux/io.h> #include <asm/hardware/gic.h> #include <asm/cacheflush.h> #include <asm/cputype.h> #include <asm/mach-types.h> #include <mach/msm_iomap.h> #include "scm-boot.h" #define VDD_SC1_ARRAY_CLAMP_GFS_CTL 0x15A0 #define SCSS_CPU1CORE_RESET 0xD80 #define SCSS_DBG_STATUS_CORE_PWRDUP 0xE64 /* Mask for edge trigger PPIs except AVS_SVICINT and AVS_SVICINTSWDONE */ #define GIC_PPI_EDGE_MASK 0xFFFFD7FF extern void msm_secondary_startup(void); /* * control for which core is the next to come out of the secondary * boot "holding pen". */ volatile int pen_release = -1; static DEFINE_SPINLOCK(boot_lock); static inline int get_core_count(void) { /* 1 + the PART[1:0] field of MIDR */ return ((read_cpuid_id() >> 4) & 3) + 1; } void __cpuinit platform_secondary_init(unsigned int cpu) { /* Configure edge-triggered PPIs */ writel(GIC_PPI_EDGE_MASK, MSM_QGIC_DIST_BASE + GIC_DIST_CONFIG + 4); /* * if any interrupts are already enabled for the primary * core (e.g. timer irq), then they will not have been enabled * for us: do so */ gic_secondary_init(0); /* * let the primary processor know we're out of the * pen, then head off into the C entry point */ pen_release = -1; smp_wmb(); /* * Synchronise with the boot thread. */ spin_lock(&boot_lock); spin_unlock(&boot_lock); } static __cpuinit void prepare_cold_cpu(unsigned int cpu) { int ret; ret = scm_set_boot_addr(virt_to_phys(msm_secondary_startup), SCM_FLAG_COLDBOOT_CPU1); if (ret == 0) { void *sc1_base_ptr; sc1_base_ptr = ioremap_nocache(0x00902000, SZ_4K*2); if (sc1_base_ptr) { writel(0, sc1_base_ptr + VDD_SC1_ARRAY_CLAMP_GFS_CTL); writel(0, sc1_base_ptr + SCSS_CPU1CORE_RESET); writel(3, sc1_base_ptr + SCSS_DBG_STATUS_CORE_PWRDUP); iounmap(sc1_base_ptr); } } else printk(KERN_DEBUG "Failed to set secondary core boot " "address\n"); } int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; static int cold_boot_done; /* Only need to bring cpu out of reset this way once */ if (cold_boot_done == false) { prepare_cold_cpu(cpu); cold_boot_done = true; } /* * set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting pen_release. * * Note that "pen_release" is the hardware CPU ID, whereas * "cpu" is Linux's internal ID. */ pen_release = cpu_logical_map(cpu); __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); /* * Send the secondary CPU a soft interrupt, thereby causing * the boot monitor to read the system wide flags register, * and branch to the address found there. */ gic_raise_softirq(cpumask_of(cpu), 1); timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); if (pen_release == -1) break; udelay(10); } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } /* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. The msm8x60 * does not support the ARM SCU, so just set the possible cpu mask to * NR_CPUS. */ void __init smp_init_cpus(void) { unsigned int i, ncores = get_core_count(); if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); } void __init platform_smp_prepare_cpus(unsigned int max_cpus) { }
gpl-2.0