repo_name
string
path
string
copies
string
size
string
content
string
license
string
djmatt604/kernel_samsung_msm8660-common
arch/frv/kernel/ptrace.c
3143
9127
/* ptrace.c: FRV specific parts of process tracing * * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * - Derived from arch/m68k/kernel/ptrace.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/security.h> #include <linux/signal.h> #include <linux/regset.h> #include <linux/elf.h> #include <linux/tracehook.h> #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/system.h> #include <asm/processor.h> #include <asm/unistd.h> /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ /* * retrieve the contents of FRV userspace general registers */ static int genregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { const struct user_int_regs *iregs = &target->thread.user->i; int ret; ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, iregs, 0, sizeof(*iregs)); if (ret < 0) return ret; return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, sizeof(*iregs), -1); } /* * update the contents of the FRV userspace general registers */ static int genregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct user_int_regs *iregs = &target->thread.user->i; unsigned int offs_gr0, offs_gr1; int ret; /* not allowed to set PSR or __status */ if (pos < offsetof(struct user_int_regs, psr) + sizeof(long) && pos + count > offsetof(struct user_int_regs, psr)) return -EIO; if (pos < offsetof(struct user_int_regs, __status) + sizeof(long) && pos + count > offsetof(struct user_int_regs, __status)) return -EIO; /* set the control regs */ offs_gr0 = offsetof(struct user_int_regs, gr[0]); offs_gr1 = offsetof(struct user_int_regs, gr[1]); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, iregs, 0, offs_gr0); if (ret < 0) return ret; /* skip GR0/TBR */ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, offs_gr0, offs_gr1); if (ret < 0) return ret; /* set the general regs */ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &iregs->gr[1], offs_gr1, sizeof(*iregs)); if (ret < 0) return ret; return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, sizeof(*iregs), -1); } /* * retrieve the contents of FRV userspace FP/Media registers */ static int fpmregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { const struct user_fpmedia_regs *fpregs = &target->thread.user->f; int ret; ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, fpregs, 0, sizeof(*fpregs)); if (ret < 0) return ret; return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, sizeof(*fpregs), -1); } /* * update the contents of the FRV userspace FP/Media registers */ static int fpmregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct user_fpmedia_regs *fpregs = &target->thread.user->f; int ret; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fpregs, 0, sizeof(*fpregs)); if (ret < 0) return ret; return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, sizeof(*fpregs), -1); } /* * determine if the FP/Media registers have actually been used */ static int fpmregs_active(struct task_struct *target, const struct user_regset *regset) { return tsk_used_math(target) ? regset->n : 0; } /* * Define the register sets available on the FRV under Linux */ enum frv_regset { REGSET_GENERAL, REGSET_FPMEDIA, }; static const struct user_regset frv_regsets[] = { /* * General register format is: * PSR, ISR, CCR, CCCR, LR, LCR, PC, (STATUS), SYSCALLNO, ORIG_G8 * GNER0-1, IACC0, TBR, GR1-63 */ [REGSET_GENERAL] = { .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, .size = sizeof(long), .align = sizeof(long), .get = genregs_get, .set = genregs_set, }, /* * FPU/Media register format is: * FR0-63, FNER0-1, MSR0-1, ACC0-7, ACCG0-8, FSR */ [REGSET_FPMEDIA] = { .core_note_type = NT_PRFPREG, .n = sizeof(struct user_fpmedia_regs) / sizeof(long), .size = sizeof(long), .align = sizeof(long), .get = fpmregs_get, .set = fpmregs_set, .active = fpmregs_active, }, }; static const struct user_regset_view user_frv_native_view = { .name = "frv", .e_machine = EM_FRV, .regsets = frv_regsets, .n = ARRAY_SIZE(frv_regsets), }; const struct user_regset_view *task_user_regset_view(struct task_struct *task) { return &user_frv_native_view; } /* * Get contents of register REGNO in task TASK. */ static inline long get_reg(struct task_struct *task, int regno) { struct user_context *user = task->thread.user; if (regno < 0 || regno >= PT__END) return 0; return ((unsigned long *) user)[regno]; } /* * Write contents of register REGNO in task TASK. */ static inline int put_reg(struct task_struct *task, int regno, unsigned long data) { struct user_context *user = task->thread.user; if (regno < 0 || regno >= PT__END) return -EIO; switch (regno) { case PT_GR(0): return 0; case PT_PSR: case PT__STATUS: return -EIO; default: ((unsigned long *) user)[regno] = data; return 0; } } /* * Called by kernel/ptrace.c when detaching.. * * Control h/w single stepping */ void user_enable_single_step(struct task_struct *child) { child->thread.frame0->__status |= REG__STATUS_STEP; } void user_disable_single_step(struct task_struct *child) { child->thread.frame0->__status &= ~REG__STATUS_STEP; } void ptrace_disable(struct task_struct *child) { user_disable_single_step(child); } long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { unsigned long tmp; int ret; int regno = addr >> 2; unsigned long __user *datap = (unsigned long __user *) data; switch (request) { /* read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { tmp = 0; ret = -EIO; if (addr & 3) break; ret = 0; switch (regno) { case 0 ... PT__END - 1: tmp = get_reg(child, regno); break; case PT__END + 0: tmp = child->mm->end_code - child->mm->start_code; break; case PT__END + 1: tmp = child->mm->end_data - child->mm->start_data; break; case PT__END + 2: tmp = child->mm->start_stack - child->mm->start_brk; break; case PT__END + 3: tmp = child->mm->start_code; break; case PT__END + 4: tmp = child->mm->start_stack; break; default: ret = -EIO; break; } if (ret == 0) ret = put_user(tmp, datap); break; } case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ ret = -EIO; if (addr & 3) break; switch (regno) { case 0 ... PT__END - 1: ret = put_reg(child, regno, data); break; } break; case PTRACE_GETREGS: /* Get all integer regs from the child. */ return copy_regset_to_user(child, &user_frv_native_view, REGSET_GENERAL, 0, sizeof(child->thread.user->i), datap); case PTRACE_SETREGS: /* Set all integer regs in the child. */ return copy_regset_from_user(child, &user_frv_native_view, REGSET_GENERAL, 0, sizeof(child->thread.user->i), datap); case PTRACE_GETFPREGS: /* Get the child FP/Media state. */ return copy_regset_to_user(child, &user_frv_native_view, REGSET_FPMEDIA, 0, sizeof(child->thread.user->f), datap); case PTRACE_SETFPREGS: /* Set the child FP/Media state. */ return copy_regset_from_user(child, &user_frv_native_view, REGSET_FPMEDIA, 0, sizeof(child->thread.user->f), datap); default: ret = ptrace_request(child, request, addr, data); break; } return ret; } /* * handle tracing of system call entry * - return the revised system call number or ULONG_MAX to cause ENOSYS */ asmlinkage unsigned long syscall_trace_entry(void) { __frame->__status |= REG__STATUS_SYSC_ENTRY; if (tracehook_report_syscall_entry(__frame)) { /* tracing decided this syscall should not happen, so * We'll return a bogus call number to get an ENOSYS * error, but leave the original number in * __frame->syscallno */ return ULONG_MAX; } return __frame->syscallno; } /* * handle tracing of system call exit */ asmlinkage void syscall_trace_exit(void) { __frame->__status |= REG__STATUS_SYSC_EXIT; tracehook_report_syscall_exit(__frame, 0); }
gpl-2.0
YUPlayGodDev/android_kernel_cyanogen_msm8916
drivers/platform/x86/msi-wmi.c
3143
9490
/* * MSI WMI hotkeys * * Copyright (C) 2009 Novell <trenn@suse.de> * * Most stuff taken over from hp-wmi * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <linux/acpi.h> #include <linux/backlight.h> #include <linux/slab.h> #include <linux/module.h> MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>"); MODULE_DESCRIPTION("MSI laptop WMI hotkeys driver"); MODULE_LICENSE("GPL"); #define DRV_NAME "msi-wmi" #define MSIWMI_BIOS_GUID "551A1F84-FBDD-4125-91DB-3EA8F44F1D45" #define MSIWMI_MSI_EVENT_GUID "B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2" #define MSIWMI_WIND_EVENT_GUID "5B3CC38A-40D9-7245-8AE6-1145B751BE3F" MODULE_ALIAS("wmi:" MSIWMI_BIOS_GUID); MODULE_ALIAS("wmi:" MSIWMI_MSI_EVENT_GUID); MODULE_ALIAS("wmi:" MSIWMI_WIND_EVENT_GUID); enum msi_scancodes { /* Generic MSI keys (not present on MSI Wind) */ MSI_KEY_BRIGHTNESSUP = 0xD0, MSI_KEY_BRIGHTNESSDOWN, MSI_KEY_VOLUMEUP, MSI_KEY_VOLUMEDOWN, MSI_KEY_MUTE, /* MSI Wind keys */ WIND_KEY_TOUCHPAD = 0x08, /* Fn+F3 touchpad toggle */ WIND_KEY_BLUETOOTH = 0x56, /* Fn+F11 Bluetooth toggle */ WIND_KEY_CAMERA, /* Fn+F6 webcam toggle */ WIND_KEY_WLAN = 0x5f, /* Fn+F11 Wi-Fi toggle */ WIND_KEY_TURBO, /* Fn+F10 turbo mode toggle */ WIND_KEY_ECO = 0x69, /* Fn+F10 ECO mode toggle */ }; static struct key_entry msi_wmi_keymap[] = { { KE_KEY, MSI_KEY_BRIGHTNESSUP, {KEY_BRIGHTNESSUP} }, { KE_KEY, MSI_KEY_BRIGHTNESSDOWN, {KEY_BRIGHTNESSDOWN} }, { KE_KEY, MSI_KEY_VOLUMEUP, {KEY_VOLUMEUP} }, { KE_KEY, MSI_KEY_VOLUMEDOWN, {KEY_VOLUMEDOWN} }, { KE_KEY, MSI_KEY_MUTE, {KEY_MUTE} }, /* These keys work without WMI. Ignore them to avoid double keycodes */ { KE_IGNORE, WIND_KEY_TOUCHPAD, {KEY_TOUCHPAD_TOGGLE} }, { KE_IGNORE, WIND_KEY_BLUETOOTH, {KEY_BLUETOOTH} }, { KE_IGNORE, WIND_KEY_CAMERA, {KEY_CAMERA} }, { KE_IGNORE, WIND_KEY_WLAN, {KEY_WLAN} }, /* These are unknown WMI events found on MSI Wind */ { KE_IGNORE, 0x00 }, { KE_IGNORE, 0x62 }, { KE_IGNORE, 0x63 }, /* These are MSI Wind keys that should be handled via WMI */ { KE_KEY, WIND_KEY_TURBO, {KEY_PROG1} }, { KE_KEY, WIND_KEY_ECO, {KEY_PROG2} }, { KE_END, 0 } }; static ktime_t last_pressed; static const struct { const char *guid; bool quirk_last_pressed; } *event_wmi, event_wmis[] = { { MSIWMI_MSI_EVENT_GUID, true }, { MSIWMI_WIND_EVENT_GUID, false }, }; static struct backlight_device *backlight; static int backlight_map[] = { 0x00, 0x33, 0x66, 0x99, 0xCC, 0xFF }; static struct input_dev *msi_wmi_input_dev; static int msi_wmi_query_block(int instance, int *ret) { acpi_status status; union acpi_object *obj; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; status = wmi_query_block(MSIWMI_BIOS_GUID, instance, &output); obj = output.pointer; if (!obj || obj->type != ACPI_TYPE_INTEGER) { if (obj) { pr_err("query block returned object " "type: %d - buffer length:%d\n", obj->type, obj->type == ACPI_TYPE_BUFFER ? obj->buffer.length : 0); } kfree(obj); return -EINVAL; } *ret = obj->integer.value; kfree(obj); return 0; } static int msi_wmi_set_block(int instance, int value) { acpi_status status; struct acpi_buffer input = { sizeof(int), &value }; pr_debug("Going to set block of instance: %d - value: %d\n", instance, value); status = wmi_set_block(MSIWMI_BIOS_GUID, instance, &input); return ACPI_SUCCESS(status) ? 0 : 1; } static int bl_get(struct backlight_device *bd) { int level, err, ret; /* Instance 1 is "get backlight", cmp with DSDT */ err = msi_wmi_query_block(1, &ret); if (err) { pr_err("Could not query backlight: %d\n", err); return -EINVAL; } pr_debug("Get: Query block returned: %d\n", ret); for (level = 0; level < ARRAY_SIZE(backlight_map); level++) { if (backlight_map[level] == ret) { pr_debug("Current backlight level: 0x%X - index: %d\n", backlight_map[level], level); break; } } if (level == ARRAY_SIZE(backlight_map)) { pr_err("get: Invalid brightness value: 0x%X\n", ret); return -EINVAL; } return level; } static int bl_set_status(struct backlight_device *bd) { int bright = bd->props.brightness; if (bright >= ARRAY_SIZE(backlight_map) || bright < 0) return -EINVAL; /* Instance 0 is "set backlight" */ return msi_wmi_set_block(0, backlight_map[bright]); } static const struct backlight_ops msi_backlight_ops = { .get_brightness = bl_get, .update_status = bl_set_status, }; static void msi_wmi_notify(u32 value, void *context) { struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; static struct key_entry *key; union acpi_object *obj; acpi_status status; status = wmi_get_event_data(value, &response); if (status != AE_OK) { pr_info("bad event status 0x%x\n", status); return; } obj = (union acpi_object *)response.pointer; if (obj && obj->type == ACPI_TYPE_INTEGER) { int eventcode = obj->integer.value; pr_debug("Eventcode: 0x%x\n", eventcode); key = sparse_keymap_entry_from_scancode(msi_wmi_input_dev, eventcode); if (!key) { pr_info("Unknown key pressed - %x\n", eventcode); goto msi_wmi_notify_exit; } if (event_wmi->quirk_last_pressed) { ktime_t cur = ktime_get_real(); ktime_t diff = ktime_sub(cur, last_pressed); /* Ignore event if any event happened in a 50 ms timeframe -> Key press may result in 10-20 GPEs */ if (ktime_to_us(diff) < 1000 * 50) { pr_debug("Suppressed key event 0x%X - " "Last press was %lld us ago\n", key->code, ktime_to_us(diff)); goto msi_wmi_notify_exit; } last_pressed = cur; } if (key->type == KE_KEY && /* Brightness is served via acpi video driver */ (backlight || (key->code != MSI_KEY_BRIGHTNESSUP && key->code != MSI_KEY_BRIGHTNESSDOWN))) { pr_debug("Send key: 0x%X - Input layer keycode: %d\n", key->code, key->keycode); sparse_keymap_report_entry(msi_wmi_input_dev, key, 1, true); } } else pr_info("Unknown event received\n"); msi_wmi_notify_exit: kfree(response.pointer); } static int __init msi_wmi_backlight_setup(void) { int err; struct backlight_properties props; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = ARRAY_SIZE(backlight_map) - 1; backlight = backlight_device_register(DRV_NAME, NULL, NULL, &msi_backlight_ops, &props); if (IS_ERR(backlight)) return PTR_ERR(backlight); err = bl_get(NULL); if (err < 0) { backlight_device_unregister(backlight); return err; } backlight->props.brightness = err; return 0; } static int __init msi_wmi_input_setup(void) { int err; msi_wmi_input_dev = input_allocate_device(); if (!msi_wmi_input_dev) return -ENOMEM; msi_wmi_input_dev->name = "MSI WMI hotkeys"; msi_wmi_input_dev->phys = "wmi/input0"; msi_wmi_input_dev->id.bustype = BUS_HOST; err = sparse_keymap_setup(msi_wmi_input_dev, msi_wmi_keymap, NULL); if (err) goto err_free_dev; err = input_register_device(msi_wmi_input_dev); if (err) goto err_free_keymap; last_pressed = ktime_set(0, 0); return 0; err_free_keymap: sparse_keymap_free(msi_wmi_input_dev); err_free_dev: input_free_device(msi_wmi_input_dev); return err; } static int __init msi_wmi_init(void) { int err; int i; for (i = 0; i < ARRAY_SIZE(event_wmis); i++) { if (!wmi_has_guid(event_wmis[i].guid)) continue; err = msi_wmi_input_setup(); if (err) { pr_err("Unable to setup input device\n"); return err; } err = wmi_install_notify_handler(event_wmis[i].guid, msi_wmi_notify, NULL); if (ACPI_FAILURE(err)) { pr_err("Unable to setup WMI notify handler\n"); goto err_free_input; } pr_debug("Event handler installed\n"); event_wmi = &event_wmis[i]; break; } if (wmi_has_guid(MSIWMI_BIOS_GUID) && !acpi_video_backlight_support()) { err = msi_wmi_backlight_setup(); if (err) { pr_err("Unable to setup backlight device\n"); goto err_uninstall_handler; } pr_debug("Backlight device created\n"); } if (!event_wmi && !backlight) { pr_err("This machine doesn't have neither MSI-hotkeys nor backlight through WMI\n"); return -ENODEV; } return 0; err_uninstall_handler: if (event_wmi) wmi_remove_notify_handler(event_wmi->guid); err_free_input: if (event_wmi) { sparse_keymap_free(msi_wmi_input_dev); input_unregister_device(msi_wmi_input_dev); } return err; } static void __exit msi_wmi_exit(void) { if (event_wmi) { wmi_remove_notify_handler(event_wmi->guid); sparse_keymap_free(msi_wmi_input_dev); input_unregister_device(msi_wmi_input_dev); } if (backlight) backlight_device_unregister(backlight); } module_init(msi_wmi_init); module_exit(msi_wmi_exit);
gpl-2.0
mdo-rom/platform_kernel_samsung_crespo
kernel/seccomp.c
4935
1675
/* * linux/kernel/seccomp.c * * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com> * * This defines a simple but solid secure-computing mode. */ #include <linux/seccomp.h> #include <linux/sched.h> #include <linux/compat.h> /* #define SECCOMP_DEBUG 1 */ #define NR_SECCOMP_MODES 1 /* * Secure computing mode 1 allows only read/write/exit/sigreturn. * To be fully secure this must be combined with rlimit * to limit the stack allocations too. */ static int mode1_syscalls[] = { __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn, 0, /* null terminated */ }; #ifdef CONFIG_COMPAT static int mode1_syscalls_32[] = { __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32, 0, /* null terminated */ }; #endif void __secure_computing(int this_syscall) { int mode = current->seccomp.mode; int * syscall; switch (mode) { case 1: syscall = mode1_syscalls; #ifdef CONFIG_COMPAT if (is_compat_task()) syscall = mode1_syscalls_32; #endif do { if (*syscall == this_syscall) return; } while (*++syscall); break; default: BUG(); } #ifdef SECCOMP_DEBUG dump_stack(); #endif do_exit(SIGKILL); } long prctl_get_seccomp(void) { return current->seccomp.mode; } long prctl_set_seccomp(unsigned long seccomp_mode) { long ret; /* can set it only once to be even more secure */ ret = -EPERM; if (unlikely(current->seccomp.mode)) goto out; ret = -EINVAL; if (seccomp_mode && seccomp_mode <= NR_SECCOMP_MODES) { current->seccomp.mode = seccomp_mode; set_thread_flag(TIF_SECCOMP); #ifdef TIF_NOTSC disable_TSC(); #endif ret = 0; } out: return ret; }
gpl-2.0
kh007im/Ngawi-XL
drivers/char/ipmi/ipmi_msghandler.c
4935
118558
/* * ipmi_msghandler.c * * Incoming and outgoing message routing for an IPMI interface. * * Author: MontaVista Software, Inc. * Corey Minyard <minyard@mvista.com> * source@mvista.com * * Copyright 2002 MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/ipmi.h> #include <linux/ipmi_smi.h> #include <linux/notifier.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/rcupdate.h> #include <linux/interrupt.h> #define PFX "IPMI message handler: " #define IPMI_DRIVER_VERSION "39.2" static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); static int ipmi_init_msghandler(void); static void smi_recv_tasklet(unsigned long); static void handle_new_recv_msgs(ipmi_smi_t intf); static int initialized; #ifdef CONFIG_PROC_FS static struct proc_dir_entry *proc_ipmi_root; #endif /* CONFIG_PROC_FS */ /* Remain in auto-maintenance mode for this amount of time (in ms). */ #define IPMI_MAINTENANCE_MODE_TIMEOUT 30000 #define MAX_EVENTS_IN_QUEUE 25 /* * Don't let a message sit in a queue forever, always time it with at lest * the max message timer. This is in milliseconds. */ #define MAX_MSG_TIMEOUT 60000 /* * The main "user" data structure. */ struct ipmi_user { struct list_head link; /* Set to "0" when the user is destroyed. */ int valid; struct kref refcount; /* The upper layer that handles receive messages. */ struct ipmi_user_hndl *handler; void *handler_data; /* The interface this user is bound to. */ ipmi_smi_t intf; /* Does this interface receive IPMI events? */ int gets_events; }; struct cmd_rcvr { struct list_head link; ipmi_user_t user; unsigned char netfn; unsigned char cmd; unsigned int chans; /* * This is used to form a linked lised during mass deletion. * Since this is in an RCU list, we cannot use the link above * or change any data until the RCU period completes. So we * use this next variable during mass deletion so we can have * a list and don't have to wait and restart the search on * every individual deletion of a command. */ struct cmd_rcvr *next; }; struct seq_table { unsigned int inuse : 1; unsigned int broadcast : 1; unsigned long timeout; unsigned long orig_timeout; unsigned int retries_left; /* * To verify on an incoming send message response that this is * the message that the response is for, we keep a sequence id * and increment it every time we send a message. */ long seqid; /* * This is held so we can properly respond to the message on a * timeout, and it is used to hold the temporary data for * retransmission, too. */ struct ipmi_recv_msg *recv_msg; }; /* * Store the information in a msgid (long) to allow us to find a * sequence table entry from the msgid. */ #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff)) #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ do { \ seq = ((msgid >> 26) & 0x3f); \ seqid = (msgid & 0x3fffff); \ } while (0) #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff) struct ipmi_channel { unsigned char medium; unsigned char protocol; /* * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, * but may be changed by the user. */ unsigned char address; /* * My LUN. This should generally stay the SMS LUN, but just in * case... */ unsigned char lun; }; #ifdef CONFIG_PROC_FS struct ipmi_proc_entry { char *name; struct ipmi_proc_entry *next; }; #endif struct bmc_device { struct platform_device *dev; struct ipmi_device_id id; unsigned char guid[16]; int guid_set; struct kref refcount; /* bmc device attributes */ struct device_attribute device_id_attr; struct device_attribute provides_dev_sdrs_attr; struct device_attribute revision_attr; struct device_attribute firmware_rev_attr; struct device_attribute version_attr; struct device_attribute add_dev_support_attr; struct device_attribute manufacturer_id_attr; struct device_attribute product_id_attr; struct device_attribute guid_attr; struct device_attribute aux_firmware_rev_attr; }; /* * Various statistics for IPMI, these index stats[] in the ipmi_smi * structure. */ enum ipmi_stat_indexes { /* Commands we got from the user that were invalid. */ IPMI_STAT_sent_invalid_commands = 0, /* Commands we sent to the MC. */ IPMI_STAT_sent_local_commands, /* Responses from the MC that were delivered to a user. */ IPMI_STAT_handled_local_responses, /* Responses from the MC that were not delivered to a user. */ IPMI_STAT_unhandled_local_responses, /* Commands we sent out to the IPMB bus. */ IPMI_STAT_sent_ipmb_commands, /* Commands sent on the IPMB that had errors on the SEND CMD */ IPMI_STAT_sent_ipmb_command_errs, /* Each retransmit increments this count. */ IPMI_STAT_retransmitted_ipmb_commands, /* * When a message times out (runs out of retransmits) this is * incremented. */ IPMI_STAT_timed_out_ipmb_commands, /* * This is like above, but for broadcasts. Broadcasts are * *not* included in the above count (they are expected to * time out). */ IPMI_STAT_timed_out_ipmb_broadcasts, /* Responses I have sent to the IPMB bus. */ IPMI_STAT_sent_ipmb_responses, /* The response was delivered to the user. */ IPMI_STAT_handled_ipmb_responses, /* The response had invalid data in it. */ IPMI_STAT_invalid_ipmb_responses, /* The response didn't have anyone waiting for it. */ IPMI_STAT_unhandled_ipmb_responses, /* Commands we sent out to the IPMB bus. */ IPMI_STAT_sent_lan_commands, /* Commands sent on the IPMB that had errors on the SEND CMD */ IPMI_STAT_sent_lan_command_errs, /* Each retransmit increments this count. */ IPMI_STAT_retransmitted_lan_commands, /* * When a message times out (runs out of retransmits) this is * incremented. */ IPMI_STAT_timed_out_lan_commands, /* Responses I have sent to the IPMB bus. */ IPMI_STAT_sent_lan_responses, /* The response was delivered to the user. */ IPMI_STAT_handled_lan_responses, /* The response had invalid data in it. */ IPMI_STAT_invalid_lan_responses, /* The response didn't have anyone waiting for it. */ IPMI_STAT_unhandled_lan_responses, /* The command was delivered to the user. */ IPMI_STAT_handled_commands, /* The command had invalid data in it. */ IPMI_STAT_invalid_commands, /* The command didn't have anyone waiting for it. */ IPMI_STAT_unhandled_commands, /* Invalid data in an event. */ IPMI_STAT_invalid_events, /* Events that were received with the proper format. */ IPMI_STAT_events, /* Retransmissions on IPMB that failed. */ IPMI_STAT_dropped_rexmit_ipmb_commands, /* Retransmissions on LAN that failed. */ IPMI_STAT_dropped_rexmit_lan_commands, /* This *must* remain last, add new values above this. */ IPMI_NUM_STATS }; #define IPMI_IPMB_NUM_SEQ 64 #define IPMI_MAX_CHANNELS 16 struct ipmi_smi { /* What interface number are we? */ int intf_num; struct kref refcount; /* Used for a list of interfaces. */ struct list_head link; /* * The list of upper layers that are using me. seq_lock * protects this. */ struct list_head users; /* Information to supply to users. */ unsigned char ipmi_version_major; unsigned char ipmi_version_minor; /* Used for wake ups at startup. */ wait_queue_head_t waitq; struct bmc_device *bmc; char *my_dev_name; char *sysfs_name; /* * This is the lower-layer's sender routine. Note that you * must either be holding the ipmi_interfaces_mutex or be in * an umpreemptible region to use this. You must fetch the * value into a local variable and make sure it is not NULL. */ struct ipmi_smi_handlers *handlers; void *send_info; #ifdef CONFIG_PROC_FS /* A list of proc entries for this interface. */ struct mutex proc_entry_lock; struct ipmi_proc_entry *proc_entries; #endif /* Driver-model device for the system interface. */ struct device *si_dev; /* * A table of sequence numbers for this interface. We use the * sequence numbers for IPMB messages that go out of the * interface to match them up with their responses. A routine * is called periodically to time the items in this list. */ spinlock_t seq_lock; struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; int curr_seq; /* * Messages queued for delivery. If delivery fails (out of memory * for instance), They will stay in here to be processed later in a * periodic timer interrupt. The tasklet is for handling received * messages directly from the handler. */ spinlock_t waiting_msgs_lock; struct list_head waiting_msgs; atomic_t watchdog_pretimeouts_to_deliver; struct tasklet_struct recv_tasklet; /* * The list of command receivers that are registered for commands * on this interface. */ struct mutex cmd_rcvrs_mutex; struct list_head cmd_rcvrs; /* * Events that were queues because no one was there to receive * them. */ spinlock_t events_lock; /* For dealing with event stuff. */ struct list_head waiting_events; unsigned int waiting_events_count; /* How many events in queue? */ char delivering_events; char event_msg_printed; /* * The event receiver for my BMC, only really used at panic * shutdown as a place to store this. */ unsigned char event_receiver; unsigned char event_receiver_lun; unsigned char local_sel_device; unsigned char local_event_generator; /* For handling of maintenance mode. */ int maintenance_mode; int maintenance_mode_enable; int auto_maintenance_timeout; spinlock_t maintenance_mode_lock; /* Used in a timer... */ /* * A cheap hack, if this is non-null and a message to an * interface comes in with a NULL user, call this routine with * it. Note that the message will still be freed by the * caller. This only works on the system interface. */ void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg); /* * When we are scanning the channels for an SMI, this will * tell which channel we are scanning. */ int curr_channel; /* Channel information */ struct ipmi_channel channels[IPMI_MAX_CHANNELS]; /* Proc FS stuff. */ struct proc_dir_entry *proc_dir; char proc_dir_name[10]; atomic_t stats[IPMI_NUM_STATS]; /* * run_to_completion duplicate of smb_info, smi_info * and ipmi_serial_info structures. Used to decrease numbers of * parameters passed by "low" level IPMI code. */ int run_to_completion; }; #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) /** * The driver model view of the IPMI messaging driver. */ static struct platform_driver ipmidriver = { .driver = { .name = "ipmi", .bus = &platform_bus_type } }; static DEFINE_MUTEX(ipmidriver_mutex); static LIST_HEAD(ipmi_interfaces); static DEFINE_MUTEX(ipmi_interfaces_mutex); /* * List of watchers that want to know when smi's are added and deleted. */ static LIST_HEAD(smi_watchers); static DEFINE_MUTEX(smi_watchers_mutex); #define ipmi_inc_stat(intf, stat) \ atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) #define ipmi_get_stat(intf, stat) \ ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) static int is_lan_addr(struct ipmi_addr *addr) { return addr->addr_type == IPMI_LAN_ADDR_TYPE; } static int is_ipmb_addr(struct ipmi_addr *addr) { return addr->addr_type == IPMI_IPMB_ADDR_TYPE; } static int is_ipmb_bcast_addr(struct ipmi_addr *addr) { return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE; } static void free_recv_msg_list(struct list_head *q) { struct ipmi_recv_msg *msg, *msg2; list_for_each_entry_safe(msg, msg2, q, link) { list_del(&msg->link); ipmi_free_recv_msg(msg); } } static void free_smi_msg_list(struct list_head *q) { struct ipmi_smi_msg *msg, *msg2; list_for_each_entry_safe(msg, msg2, q, link) { list_del(&msg->link); ipmi_free_smi_msg(msg); } } static void clean_up_interface_data(ipmi_smi_t intf) { int i; struct cmd_rcvr *rcvr, *rcvr2; struct list_head list; tasklet_kill(&intf->recv_tasklet); free_smi_msg_list(&intf->waiting_msgs); free_recv_msg_list(&intf->waiting_events); /* * Wholesale remove all the entries from the list in the * interface and wait for RCU to know that none are in use. */ mutex_lock(&intf->cmd_rcvrs_mutex); INIT_LIST_HEAD(&list); list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu); mutex_unlock(&intf->cmd_rcvrs_mutex); list_for_each_entry_safe(rcvr, rcvr2, &list, link) kfree(rcvr); for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { if ((intf->seq_table[i].inuse) && (intf->seq_table[i].recv_msg)) ipmi_free_recv_msg(intf->seq_table[i].recv_msg); } } static void intf_free(struct kref *ref) { ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount); clean_up_interface_data(intf); kfree(intf); } struct watcher_entry { int intf_num; ipmi_smi_t intf; struct list_head link; }; int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) { ipmi_smi_t intf; LIST_HEAD(to_deliver); struct watcher_entry *e, *e2; mutex_lock(&smi_watchers_mutex); mutex_lock(&ipmi_interfaces_mutex); /* Build a list of things to deliver. */ list_for_each_entry(intf, &ipmi_interfaces, link) { if (intf->intf_num == -1) continue; e = kmalloc(sizeof(*e), GFP_KERNEL); if (!e) goto out_err; kref_get(&intf->refcount); e->intf = intf; e->intf_num = intf->intf_num; list_add_tail(&e->link, &to_deliver); } /* We will succeed, so add it to the list. */ list_add(&watcher->link, &smi_watchers); mutex_unlock(&ipmi_interfaces_mutex); list_for_each_entry_safe(e, e2, &to_deliver, link) { list_del(&e->link); watcher->new_smi(e->intf_num, e->intf->si_dev); kref_put(&e->intf->refcount, intf_free); kfree(e); } mutex_unlock(&smi_watchers_mutex); return 0; out_err: mutex_unlock(&ipmi_interfaces_mutex); mutex_unlock(&smi_watchers_mutex); list_for_each_entry_safe(e, e2, &to_deliver, link) { list_del(&e->link); kref_put(&e->intf->refcount, intf_free); kfree(e); } return -ENOMEM; } EXPORT_SYMBOL(ipmi_smi_watcher_register); int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) { mutex_lock(&smi_watchers_mutex); list_del(&(watcher->link)); mutex_unlock(&smi_watchers_mutex); return 0; } EXPORT_SYMBOL(ipmi_smi_watcher_unregister); /* * Must be called with smi_watchers_mutex held. */ static void call_smi_watchers(int i, struct device *dev) { struct ipmi_smi_watcher *w; list_for_each_entry(w, &smi_watchers, link) { if (try_module_get(w->owner)) { w->new_smi(i, dev); module_put(w->owner); } } } static int ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) { if (addr1->addr_type != addr2->addr_type) return 0; if (addr1->channel != addr2->channel) return 0; if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { struct ipmi_system_interface_addr *smi_addr1 = (struct ipmi_system_interface_addr *) addr1; struct ipmi_system_interface_addr *smi_addr2 = (struct ipmi_system_interface_addr *) addr2; return (smi_addr1->lun == smi_addr2->lun); } if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) { struct ipmi_ipmb_addr *ipmb_addr1 = (struct ipmi_ipmb_addr *) addr1; struct ipmi_ipmb_addr *ipmb_addr2 = (struct ipmi_ipmb_addr *) addr2; return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) && (ipmb_addr1->lun == ipmb_addr2->lun)); } if (is_lan_addr(addr1)) { struct ipmi_lan_addr *lan_addr1 = (struct ipmi_lan_addr *) addr1; struct ipmi_lan_addr *lan_addr2 = (struct ipmi_lan_addr *) addr2; return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) && (lan_addr1->local_SWID == lan_addr2->local_SWID) && (lan_addr1->session_handle == lan_addr2->session_handle) && (lan_addr1->lun == lan_addr2->lun)); } return 1; } int ipmi_validate_addr(struct ipmi_addr *addr, int len) { if (len < sizeof(struct ipmi_system_interface_addr)) return -EINVAL; if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { if (addr->channel != IPMI_BMC_CHANNEL) return -EINVAL; return 0; } if ((addr->channel == IPMI_BMC_CHANNEL) || (addr->channel >= IPMI_MAX_CHANNELS) || (addr->channel < 0)) return -EINVAL; if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { if (len < sizeof(struct ipmi_ipmb_addr)) return -EINVAL; return 0; } if (is_lan_addr(addr)) { if (len < sizeof(struct ipmi_lan_addr)) return -EINVAL; return 0; } return -EINVAL; } EXPORT_SYMBOL(ipmi_validate_addr); unsigned int ipmi_addr_length(int addr_type) { if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) return sizeof(struct ipmi_system_interface_addr); if ((addr_type == IPMI_IPMB_ADDR_TYPE) || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) return sizeof(struct ipmi_ipmb_addr); if (addr_type == IPMI_LAN_ADDR_TYPE) return sizeof(struct ipmi_lan_addr); return 0; } EXPORT_SYMBOL(ipmi_addr_length); static void deliver_response(struct ipmi_recv_msg *msg) { if (!msg->user) { ipmi_smi_t intf = msg->user_msg_data; /* Special handling for NULL users. */ if (intf->null_user_handler) { intf->null_user_handler(intf, msg); ipmi_inc_stat(intf, handled_local_responses); } else { /* No handler, so give up. */ ipmi_inc_stat(intf, unhandled_local_responses); } ipmi_free_recv_msg(msg); } else { ipmi_user_t user = msg->user; user->handler->ipmi_recv_hndl(msg, user->handler_data); } } static void deliver_err_response(struct ipmi_recv_msg *msg, int err) { msg->recv_type = IPMI_RESPONSE_RECV_TYPE; msg->msg_data[0] = err; msg->msg.netfn |= 1; /* Convert to a response. */ msg->msg.data_len = 1; msg->msg.data = msg->msg_data; deliver_response(msg); } /* * Find the next sequence number not being used and add the given * message with the given timeout to the sequence table. This must be * called with the interface's seq_lock held. */ static int intf_next_seq(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, unsigned long timeout, int retries, int broadcast, unsigned char *seq, long *seqid) { int rv = 0; unsigned int i; for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; i = (i+1)%IPMI_IPMB_NUM_SEQ) { if (!intf->seq_table[i].inuse) break; } if (!intf->seq_table[i].inuse) { intf->seq_table[i].recv_msg = recv_msg; /* * Start with the maximum timeout, when the send response * comes in we will start the real timer. */ intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; intf->seq_table[i].orig_timeout = timeout; intf->seq_table[i].retries_left = retries; intf->seq_table[i].broadcast = broadcast; intf->seq_table[i].inuse = 1; intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); *seq = i; *seqid = intf->seq_table[i].seqid; intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; } else { rv = -EAGAIN; } return rv; } /* * Return the receive message for the given sequence number and * release the sequence number so it can be reused. Some other data * is passed in to be sure the message matches up correctly (to help * guard against message coming in after their timeout and the * sequence number being reused). */ static int intf_find_seq(ipmi_smi_t intf, unsigned char seq, short channel, unsigned char cmd, unsigned char netfn, struct ipmi_addr *addr, struct ipmi_recv_msg **recv_msg) { int rv = -ENODEV; unsigned long flags; if (seq >= IPMI_IPMB_NUM_SEQ) return -EINVAL; spin_lock_irqsave(&(intf->seq_lock), flags); if (intf->seq_table[seq].inuse) { struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) && (msg->msg.netfn == netfn) && (ipmi_addr_equal(addr, &(msg->addr)))) { *recv_msg = msg; intf->seq_table[seq].inuse = 0; rv = 0; } } spin_unlock_irqrestore(&(intf->seq_lock), flags); return rv; } /* Start the timer for a specific sequence table entry. */ static int intf_start_seq_timer(ipmi_smi_t intf, long msgid) { int rv = -ENODEV; unsigned long flags; unsigned char seq; unsigned long seqid; GET_SEQ_FROM_MSGID(msgid, seq, seqid); spin_lock_irqsave(&(intf->seq_lock), flags); /* * We do this verification because the user can be deleted * while a message is outstanding. */ if ((intf->seq_table[seq].inuse) && (intf->seq_table[seq].seqid == seqid)) { struct seq_table *ent = &(intf->seq_table[seq]); ent->timeout = ent->orig_timeout; rv = 0; } spin_unlock_irqrestore(&(intf->seq_lock), flags); return rv; } /* Got an error for the send message for a specific sequence number. */ static int intf_err_seq(ipmi_smi_t intf, long msgid, unsigned int err) { int rv = -ENODEV; unsigned long flags; unsigned char seq; unsigned long seqid; struct ipmi_recv_msg *msg = NULL; GET_SEQ_FROM_MSGID(msgid, seq, seqid); spin_lock_irqsave(&(intf->seq_lock), flags); /* * We do this verification because the user can be deleted * while a message is outstanding. */ if ((intf->seq_table[seq].inuse) && (intf->seq_table[seq].seqid == seqid)) { struct seq_table *ent = &(intf->seq_table[seq]); ent->inuse = 0; msg = ent->recv_msg; rv = 0; } spin_unlock_irqrestore(&(intf->seq_lock), flags); if (msg) deliver_err_response(msg, err); return rv; } int ipmi_create_user(unsigned int if_num, struct ipmi_user_hndl *handler, void *handler_data, ipmi_user_t *user) { unsigned long flags; ipmi_user_t new_user; int rv = 0; ipmi_smi_t intf; /* * There is no module usecount here, because it's not * required. Since this can only be used by and called from * other modules, they will implicitly use this module, and * thus this can't be removed unless the other modules are * removed. */ if (handler == NULL) return -EINVAL; /* * Make sure the driver is actually initialized, this handles * problems with initialization order. */ if (!initialized) { rv = ipmi_init_msghandler(); if (rv) return rv; /* * The init code doesn't return an error if it was turned * off, but it won't initialize. Check that. */ if (!initialized) return -ENODEV; } new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); if (!new_user) return -ENOMEM; mutex_lock(&ipmi_interfaces_mutex); list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { if (intf->intf_num == if_num) goto found; } /* Not found, return an error */ rv = -EINVAL; goto out_kfree; found: /* Note that each existing user holds a refcount to the interface. */ kref_get(&intf->refcount); kref_init(&new_user->refcount); new_user->handler = handler; new_user->handler_data = handler_data; new_user->intf = intf; new_user->gets_events = 0; if (!try_module_get(intf->handlers->owner)) { rv = -ENODEV; goto out_kref; } if (intf->handlers->inc_usecount) { rv = intf->handlers->inc_usecount(intf->send_info); if (rv) { module_put(intf->handlers->owner); goto out_kref; } } /* * Hold the lock so intf->handlers is guaranteed to be good * until now */ mutex_unlock(&ipmi_interfaces_mutex); new_user->valid = 1; spin_lock_irqsave(&intf->seq_lock, flags); list_add_rcu(&new_user->link, &intf->users); spin_unlock_irqrestore(&intf->seq_lock, flags); *user = new_user; return 0; out_kref: kref_put(&intf->refcount, intf_free); out_kfree: mutex_unlock(&ipmi_interfaces_mutex); kfree(new_user); return rv; } EXPORT_SYMBOL(ipmi_create_user); int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) { int rv = 0; ipmi_smi_t intf; struct ipmi_smi_handlers *handlers; mutex_lock(&ipmi_interfaces_mutex); list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { if (intf->intf_num == if_num) goto found; } /* Not found, return an error */ rv = -EINVAL; mutex_unlock(&ipmi_interfaces_mutex); return rv; found: handlers = intf->handlers; rv = -ENOSYS; if (handlers->get_smi_info) rv = handlers->get_smi_info(intf->send_info, data); mutex_unlock(&ipmi_interfaces_mutex); return rv; } EXPORT_SYMBOL(ipmi_get_smi_info); static void free_user(struct kref *ref) { ipmi_user_t user = container_of(ref, struct ipmi_user, refcount); kfree(user); } int ipmi_destroy_user(ipmi_user_t user) { ipmi_smi_t intf = user->intf; int i; unsigned long flags; struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvrs = NULL; user->valid = 0; /* Remove the user from the interface's sequence table. */ spin_lock_irqsave(&intf->seq_lock, flags); list_del_rcu(&user->link); for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { if (intf->seq_table[i].inuse && (intf->seq_table[i].recv_msg->user == user)) { intf->seq_table[i].inuse = 0; ipmi_free_recv_msg(intf->seq_table[i].recv_msg); } } spin_unlock_irqrestore(&intf->seq_lock, flags); /* * Remove the user from the command receiver's table. First * we build a list of everything (not using the standard link, * since other things may be using it till we do * synchronize_rcu()) then free everything in that list. */ mutex_lock(&intf->cmd_rcvrs_mutex); list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { if (rcvr->user == user) { list_del_rcu(&rcvr->link); rcvr->next = rcvrs; rcvrs = rcvr; } } mutex_unlock(&intf->cmd_rcvrs_mutex); synchronize_rcu(); while (rcvrs) { rcvr = rcvrs; rcvrs = rcvr->next; kfree(rcvr); } mutex_lock(&ipmi_interfaces_mutex); if (intf->handlers) { module_put(intf->handlers->owner); if (intf->handlers->dec_usecount) intf->handlers->dec_usecount(intf->send_info); } mutex_unlock(&ipmi_interfaces_mutex); kref_put(&intf->refcount, intf_free); kref_put(&user->refcount, free_user); return 0; } EXPORT_SYMBOL(ipmi_destroy_user); void ipmi_get_version(ipmi_user_t user, unsigned char *major, unsigned char *minor) { *major = user->intf->ipmi_version_major; *minor = user->intf->ipmi_version_minor; } EXPORT_SYMBOL(ipmi_get_version); int ipmi_set_my_address(ipmi_user_t user, unsigned int channel, unsigned char address) { if (channel >= IPMI_MAX_CHANNELS) return -EINVAL; user->intf->channels[channel].address = address; return 0; } EXPORT_SYMBOL(ipmi_set_my_address); int ipmi_get_my_address(ipmi_user_t user, unsigned int channel, unsigned char *address) { if (channel >= IPMI_MAX_CHANNELS) return -EINVAL; *address = user->intf->channels[channel].address; return 0; } EXPORT_SYMBOL(ipmi_get_my_address); int ipmi_set_my_LUN(ipmi_user_t user, unsigned int channel, unsigned char LUN) { if (channel >= IPMI_MAX_CHANNELS) return -EINVAL; user->intf->channels[channel].lun = LUN & 0x3; return 0; } EXPORT_SYMBOL(ipmi_set_my_LUN); int ipmi_get_my_LUN(ipmi_user_t user, unsigned int channel, unsigned char *address) { if (channel >= IPMI_MAX_CHANNELS) return -EINVAL; *address = user->intf->channels[channel].lun; return 0; } EXPORT_SYMBOL(ipmi_get_my_LUN); int ipmi_get_maintenance_mode(ipmi_user_t user) { int mode; unsigned long flags; spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); mode = user->intf->maintenance_mode; spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); return mode; } EXPORT_SYMBOL(ipmi_get_maintenance_mode); static void maintenance_mode_update(ipmi_smi_t intf) { if (intf->handlers->set_maintenance_mode) intf->handlers->set_maintenance_mode( intf->send_info, intf->maintenance_mode_enable); } int ipmi_set_maintenance_mode(ipmi_user_t user, int mode) { int rv = 0; unsigned long flags; ipmi_smi_t intf = user->intf; spin_lock_irqsave(&intf->maintenance_mode_lock, flags); if (intf->maintenance_mode != mode) { switch (mode) { case IPMI_MAINTENANCE_MODE_AUTO: intf->maintenance_mode = mode; intf->maintenance_mode_enable = (intf->auto_maintenance_timeout > 0); break; case IPMI_MAINTENANCE_MODE_OFF: intf->maintenance_mode = mode; intf->maintenance_mode_enable = 0; break; case IPMI_MAINTENANCE_MODE_ON: intf->maintenance_mode = mode; intf->maintenance_mode_enable = 1; break; default: rv = -EINVAL; goto out_unlock; } maintenance_mode_update(intf); } out_unlock: spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); return rv; } EXPORT_SYMBOL(ipmi_set_maintenance_mode); int ipmi_set_gets_events(ipmi_user_t user, int val) { unsigned long flags; ipmi_smi_t intf = user->intf; struct ipmi_recv_msg *msg, *msg2; struct list_head msgs; INIT_LIST_HEAD(&msgs); spin_lock_irqsave(&intf->events_lock, flags); user->gets_events = val; if (intf->delivering_events) /* * Another thread is delivering events for this, so * let it handle any new events. */ goto out; /* Deliver any queued events. */ while (user->gets_events && !list_empty(&intf->waiting_events)) { list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) list_move_tail(&msg->link, &msgs); intf->waiting_events_count = 0; if (intf->event_msg_printed) { printk(KERN_WARNING PFX "Event queue no longer" " full\n"); intf->event_msg_printed = 0; } intf->delivering_events = 1; spin_unlock_irqrestore(&intf->events_lock, flags); list_for_each_entry_safe(msg, msg2, &msgs, link) { msg->user = user; kref_get(&user->refcount); deliver_response(msg); } spin_lock_irqsave(&intf->events_lock, flags); intf->delivering_events = 0; } out: spin_unlock_irqrestore(&intf->events_lock, flags); return 0; } EXPORT_SYMBOL(ipmi_set_gets_events); static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf, unsigned char netfn, unsigned char cmd, unsigned char chan) { struct cmd_rcvr *rcvr; list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) && (rcvr->chans & (1 << chan))) return rcvr; } return NULL; } static int is_cmd_rcvr_exclusive(ipmi_smi_t intf, unsigned char netfn, unsigned char cmd, unsigned int chans) { struct cmd_rcvr *rcvr; list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) && (rcvr->chans & chans)) return 0; } return 1; } int ipmi_register_for_cmd(ipmi_user_t user, unsigned char netfn, unsigned char cmd, unsigned int chans) { ipmi_smi_t intf = user->intf; struct cmd_rcvr *rcvr; int rv = 0; rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); if (!rcvr) return -ENOMEM; rcvr->cmd = cmd; rcvr->netfn = netfn; rcvr->chans = chans; rcvr->user = user; mutex_lock(&intf->cmd_rcvrs_mutex); /* Make sure the command/netfn is not already registered. */ if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) { rv = -EBUSY; goto out_unlock; } list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); out_unlock: mutex_unlock(&intf->cmd_rcvrs_mutex); if (rv) kfree(rcvr); return rv; } EXPORT_SYMBOL(ipmi_register_for_cmd); int ipmi_unregister_for_cmd(ipmi_user_t user, unsigned char netfn, unsigned char cmd, unsigned int chans) { ipmi_smi_t intf = user->intf; struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvrs = NULL; int i, rv = -ENOENT; mutex_lock(&intf->cmd_rcvrs_mutex); for (i = 0; i < IPMI_NUM_CHANNELS; i++) { if (((1 << i) & chans) == 0) continue; rcvr = find_cmd_rcvr(intf, netfn, cmd, i); if (rcvr == NULL) continue; if (rcvr->user == user) { rv = 0; rcvr->chans &= ~chans; if (rcvr->chans == 0) { list_del_rcu(&rcvr->link); rcvr->next = rcvrs; rcvrs = rcvr; } } } mutex_unlock(&intf->cmd_rcvrs_mutex); synchronize_rcu(); while (rcvrs) { rcvr = rcvrs; rcvrs = rcvr->next; kfree(rcvr); } return rv; } EXPORT_SYMBOL(ipmi_unregister_for_cmd); static unsigned char ipmb_checksum(unsigned char *data, int size) { unsigned char csum = 0; for (; size > 0; size--, data++) csum += *data; return -csum; } static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, struct kernel_ipmi_msg *msg, struct ipmi_ipmb_addr *ipmb_addr, long msgid, unsigned char ipmb_seq, int broadcast, unsigned char source_address, unsigned char source_lun) { int i = broadcast; /* Format the IPMB header data. */ smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); smi_msg->data[1] = IPMI_SEND_MSG_CMD; smi_msg->data[2] = ipmb_addr->channel; if (broadcast) smi_msg->data[3] = 0; smi_msg->data[i+3] = ipmb_addr->slave_addr; smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2); smi_msg->data[i+6] = source_address; smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; smi_msg->data[i+8] = msg->cmd; /* Now tack on the data to the message. */ if (msg->data_len > 0) memcpy(&(smi_msg->data[i+9]), msg->data, msg->data_len); smi_msg->data_size = msg->data_len + 9; /* Now calculate the checksum and tack it on. */ smi_msg->data[i+smi_msg->data_size] = ipmb_checksum(&(smi_msg->data[i+6]), smi_msg->data_size-6); /* * Add on the checksum size and the offset from the * broadcast. */ smi_msg->data_size += 1 + i; smi_msg->msgid = msgid; } static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, struct kernel_ipmi_msg *msg, struct ipmi_lan_addr *lan_addr, long msgid, unsigned char ipmb_seq, unsigned char source_lun) { /* Format the IPMB header data. */ smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); smi_msg->data[1] = IPMI_SEND_MSG_CMD; smi_msg->data[2] = lan_addr->channel; smi_msg->data[3] = lan_addr->session_handle; smi_msg->data[4] = lan_addr->remote_SWID; smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2); smi_msg->data[7] = lan_addr->local_SWID; smi_msg->data[8] = (ipmb_seq << 2) | source_lun; smi_msg->data[9] = msg->cmd; /* Now tack on the data to the message. */ if (msg->data_len > 0) memcpy(&(smi_msg->data[10]), msg->data, msg->data_len); smi_msg->data_size = msg->data_len + 10; /* Now calculate the checksum and tack it on. */ smi_msg->data[smi_msg->data_size] = ipmb_checksum(&(smi_msg->data[7]), smi_msg->data_size-7); /* * Add on the checksum size and the offset from the * broadcast. */ smi_msg->data_size += 1; smi_msg->msgid = msgid; } /* * Separate from ipmi_request so that the user does not have to be * supplied in certain circumstances (mainly at panic time). If * messages are supplied, they will be freed, even if an error * occurs. */ static int i_ipmi_request(ipmi_user_t user, ipmi_smi_t intf, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, void *user_msg_data, void *supplied_smi, struct ipmi_recv_msg *supplied_recv, int priority, unsigned char source_address, unsigned char source_lun, int retries, unsigned int retry_time_ms) { int rv = 0; struct ipmi_smi_msg *smi_msg; struct ipmi_recv_msg *recv_msg; unsigned long flags; struct ipmi_smi_handlers *handlers; if (supplied_recv) recv_msg = supplied_recv; else { recv_msg = ipmi_alloc_recv_msg(); if (recv_msg == NULL) return -ENOMEM; } recv_msg->user_msg_data = user_msg_data; if (supplied_smi) smi_msg = (struct ipmi_smi_msg *) supplied_smi; else { smi_msg = ipmi_alloc_smi_msg(); if (smi_msg == NULL) { ipmi_free_recv_msg(recv_msg); return -ENOMEM; } } rcu_read_lock(); handlers = intf->handlers; if (!handlers) { rv = -ENODEV; goto out_err; } recv_msg->user = user; if (user) kref_get(&user->refcount); recv_msg->msgid = msgid; /* * Store the message to send in the receive message so timeout * responses can get the proper response data. */ recv_msg->msg = *msg; if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { struct ipmi_system_interface_addr *smi_addr; if (msg->netfn & 1) { /* Responses are not allowed to the SMI. */ rv = -EINVAL; goto out_err; } smi_addr = (struct ipmi_system_interface_addr *) addr; if (smi_addr->lun > 3) { ipmi_inc_stat(intf, sent_invalid_commands); rv = -EINVAL; goto out_err; } memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); if ((msg->netfn == IPMI_NETFN_APP_REQUEST) && ((msg->cmd == IPMI_SEND_MSG_CMD) || (msg->cmd == IPMI_GET_MSG_CMD) || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { /* * We don't let the user do these, since we manage * the sequence numbers. */ ipmi_inc_stat(intf, sent_invalid_commands); rv = -EINVAL; goto out_err; } if (((msg->netfn == IPMI_NETFN_APP_REQUEST) && ((msg->cmd == IPMI_COLD_RESET_CMD) || (msg->cmd == IPMI_WARM_RESET_CMD))) || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) { spin_lock_irqsave(&intf->maintenance_mode_lock, flags); intf->auto_maintenance_timeout = IPMI_MAINTENANCE_MODE_TIMEOUT; if (!intf->maintenance_mode && !intf->maintenance_mode_enable) { intf->maintenance_mode_enable = 1; maintenance_mode_update(intf); } spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); } if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) { ipmi_inc_stat(intf, sent_invalid_commands); rv = -EMSGSIZE; goto out_err; } smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); smi_msg->data[1] = msg->cmd; smi_msg->msgid = msgid; smi_msg->user_data = recv_msg; if (msg->data_len > 0) memcpy(&(smi_msg->data[2]), msg->data, msg->data_len); smi_msg->data_size = msg->data_len + 2; ipmi_inc_stat(intf, sent_local_commands); } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { struct ipmi_ipmb_addr *ipmb_addr; unsigned char ipmb_seq; long seqid; int broadcast = 0; if (addr->channel >= IPMI_MAX_CHANNELS) { ipmi_inc_stat(intf, sent_invalid_commands); rv = -EINVAL; goto out_err; } if (intf->channels[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) { ipmi_inc_stat(intf, sent_invalid_commands); rv = -EINVAL; goto out_err; } if (retries < 0) { if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) retries = 0; /* Don't retry broadcasts. */ else retries = 4; } if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { /* * Broadcasts add a zero at the beginning of the * message, but otherwise is the same as an IPMB * address. */ addr->addr_type = IPMI_IPMB_ADDR_TYPE; broadcast = 1; } /* Default to 1 second retries. */ if (retry_time_ms == 0) retry_time_ms = 1000; /* * 9 for the header and 1 for the checksum, plus * possibly one for the broadcast. */ if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { ipmi_inc_stat(intf, sent_invalid_commands); rv = -EMSGSIZE; goto out_err; } ipmb_addr = (struct ipmi_ipmb_addr *) addr; if (ipmb_addr->lun > 3) { ipmi_inc_stat(intf, sent_invalid_commands); rv = -EINVAL; goto out_err; } memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); if (recv_msg->msg.netfn & 0x1) { /* * It's a response, so use the user's sequence * from msgid. */ ipmi_inc_stat(intf, sent_ipmb_responses); format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, msgid, broadcast, source_address, source_lun); /* * Save the receive message so we can use it * to deliver the response. */ smi_msg->user_data = recv_msg; } else { /* It's a command, so get a sequence for it. */ spin_lock_irqsave(&(intf->seq_lock), flags); /* * Create a sequence number with a 1 second * timeout and 4 retries. */ rv = intf_next_seq(intf, recv_msg, retry_time_ms, retries, broadcast, &ipmb_seq, &seqid); if (rv) { /* * We have used up all the sequence numbers, * probably, so abort. */ spin_unlock_irqrestore(&(intf->seq_lock), flags); goto out_err; } ipmi_inc_stat(intf, sent_ipmb_commands); /* * Store the sequence number in the message, * so that when the send message response * comes back we can start the timer. */ format_ipmb_msg(smi_msg, msg, ipmb_addr, STORE_SEQ_IN_MSGID(ipmb_seq, seqid), ipmb_seq, broadcast, source_address, source_lun); /* * Copy the message into the recv message data, so we * can retransmit it later if necessary. */ memcpy(recv_msg->msg_data, smi_msg->data, smi_msg->data_size); recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg.data_len = smi_msg->data_size; /* * We don't unlock until here, because we need * to copy the completed message into the * recv_msg before we release the lock. * Otherwise, race conditions may bite us. I * know that's pretty paranoid, but I prefer * to be correct. */ spin_unlock_irqrestore(&(intf->seq_lock), flags); } } else if (is_lan_addr(addr)) { struct ipmi_lan_addr *lan_addr; unsigned char ipmb_seq; long seqid; if (addr->channel >= IPMI_MAX_CHANNELS) { ipmi_inc_stat(intf, sent_invalid_commands); rv = -EINVAL; goto out_err; } if ((intf->channels[addr->channel].medium != IPMI_CHANNEL_MEDIUM_8023LAN) && (intf->channels[addr->channel].medium != IPMI_CHANNEL_MEDIUM_ASYNC)) { ipmi_inc_stat(intf, sent_invalid_commands); rv = -EINVAL; goto out_err; } retries = 4; /* Default to 1 second retries. */ if (retry_time_ms == 0) retry_time_ms = 1000; /* 11 for the header and 1 for the checksum. */ if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { ipmi_inc_stat(intf, sent_invalid_commands); rv = -EMSGSIZE; goto out_err; } lan_addr = (struct ipmi_lan_addr *) addr; if (lan_addr->lun > 3) { ipmi_inc_stat(intf, sent_invalid_commands); rv = -EINVAL; goto out_err; } memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); if (recv_msg->msg.netfn & 0x1) { /* * It's a response, so use the user's sequence * from msgid. */ ipmi_inc_stat(intf, sent_lan_responses); format_lan_msg(smi_msg, msg, lan_addr, msgid, msgid, source_lun); /* * Save the receive message so we can use it * to deliver the response. */ smi_msg->user_data = recv_msg; } else { /* It's a command, so get a sequence for it. */ spin_lock_irqsave(&(intf->seq_lock), flags); /* * Create a sequence number with a 1 second * timeout and 4 retries. */ rv = intf_next_seq(intf, recv_msg, retry_time_ms, retries, 0, &ipmb_seq, &seqid); if (rv) { /* * We have used up all the sequence numbers, * probably, so abort. */ spin_unlock_irqrestore(&(intf->seq_lock), flags); goto out_err; } ipmi_inc_stat(intf, sent_lan_commands); /* * Store the sequence number in the message, * so that when the send message response * comes back we can start the timer. */ format_lan_msg(smi_msg, msg, lan_addr, STORE_SEQ_IN_MSGID(ipmb_seq, seqid), ipmb_seq, source_lun); /* * Copy the message into the recv message data, so we * can retransmit it later if necessary. */ memcpy(recv_msg->msg_data, smi_msg->data, smi_msg->data_size); recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg.data_len = smi_msg->data_size; /* * We don't unlock until here, because we need * to copy the completed message into the * recv_msg before we release the lock. * Otherwise, race conditions may bite us. I * know that's pretty paranoid, but I prefer * to be correct. */ spin_unlock_irqrestore(&(intf->seq_lock), flags); } } else { /* Unknown address type. */ ipmi_inc_stat(intf, sent_invalid_commands); rv = -EINVAL; goto out_err; } #ifdef DEBUG_MSGING { int m; for (m = 0; m < smi_msg->data_size; m++) printk(" %2.2x", smi_msg->data[m]); printk("\n"); } #endif handlers->sender(intf->send_info, smi_msg, priority); rcu_read_unlock(); return 0; out_err: rcu_read_unlock(); ipmi_free_smi_msg(smi_msg); ipmi_free_recv_msg(recv_msg); return rv; } static int check_addr(ipmi_smi_t intf, struct ipmi_addr *addr, unsigned char *saddr, unsigned char *lun) { if (addr->channel >= IPMI_MAX_CHANNELS) return -EINVAL; *lun = intf->channels[addr->channel].lun; *saddr = intf->channels[addr->channel].address; return 0; } int ipmi_request_settime(ipmi_user_t user, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, void *user_msg_data, int priority, int retries, unsigned int retry_time_ms) { unsigned char saddr, lun; int rv; if (!user) return -EINVAL; rv = check_addr(user->intf, addr, &saddr, &lun); if (rv) return rv; return i_ipmi_request(user, user->intf, addr, msgid, msg, user_msg_data, NULL, NULL, priority, saddr, lun, retries, retry_time_ms); } EXPORT_SYMBOL(ipmi_request_settime); int ipmi_request_supply_msgs(ipmi_user_t user, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, void *user_msg_data, void *supplied_smi, struct ipmi_recv_msg *supplied_recv, int priority) { unsigned char saddr, lun; int rv; if (!user) return -EINVAL; rv = check_addr(user->intf, addr, &saddr, &lun); if (rv) return rv; return i_ipmi_request(user, user->intf, addr, msgid, msg, user_msg_data, supplied_smi, supplied_recv, priority, saddr, lun, -1, 0); } EXPORT_SYMBOL(ipmi_request_supply_msgs); #ifdef CONFIG_PROC_FS static int smi_ipmb_proc_show(struct seq_file *m, void *v) { ipmi_smi_t intf = m->private; int i; seq_printf(m, "%x", intf->channels[0].address); for (i = 1; i < IPMI_MAX_CHANNELS; i++) seq_printf(m, " %x", intf->channels[i].address); return seq_putc(m, '\n'); } static int smi_ipmb_proc_open(struct inode *inode, struct file *file) { return single_open(file, smi_ipmb_proc_show, PDE(inode)->data); } static const struct file_operations smi_ipmb_proc_ops = { .open = smi_ipmb_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int smi_version_proc_show(struct seq_file *m, void *v) { ipmi_smi_t intf = m->private; return seq_printf(m, "%u.%u\n", ipmi_version_major(&intf->bmc->id), ipmi_version_minor(&intf->bmc->id)); } static int smi_version_proc_open(struct inode *inode, struct file *file) { return single_open(file, smi_version_proc_show, PDE(inode)->data); } static const struct file_operations smi_version_proc_ops = { .open = smi_version_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int smi_stats_proc_show(struct seq_file *m, void *v) { ipmi_smi_t intf = m->private; seq_printf(m, "sent_invalid_commands: %u\n", ipmi_get_stat(intf, sent_invalid_commands)); seq_printf(m, "sent_local_commands: %u\n", ipmi_get_stat(intf, sent_local_commands)); seq_printf(m, "handled_local_responses: %u\n", ipmi_get_stat(intf, handled_local_responses)); seq_printf(m, "unhandled_local_responses: %u\n", ipmi_get_stat(intf, unhandled_local_responses)); seq_printf(m, "sent_ipmb_commands: %u\n", ipmi_get_stat(intf, sent_ipmb_commands)); seq_printf(m, "sent_ipmb_command_errs: %u\n", ipmi_get_stat(intf, sent_ipmb_command_errs)); seq_printf(m, "retransmitted_ipmb_commands: %u\n", ipmi_get_stat(intf, retransmitted_ipmb_commands)); seq_printf(m, "timed_out_ipmb_commands: %u\n", ipmi_get_stat(intf, timed_out_ipmb_commands)); seq_printf(m, "timed_out_ipmb_broadcasts: %u\n", ipmi_get_stat(intf, timed_out_ipmb_broadcasts)); seq_printf(m, "sent_ipmb_responses: %u\n", ipmi_get_stat(intf, sent_ipmb_responses)); seq_printf(m, "handled_ipmb_responses: %u\n", ipmi_get_stat(intf, handled_ipmb_responses)); seq_printf(m, "invalid_ipmb_responses: %u\n", ipmi_get_stat(intf, invalid_ipmb_responses)); seq_printf(m, "unhandled_ipmb_responses: %u\n", ipmi_get_stat(intf, unhandled_ipmb_responses)); seq_printf(m, "sent_lan_commands: %u\n", ipmi_get_stat(intf, sent_lan_commands)); seq_printf(m, "sent_lan_command_errs: %u\n", ipmi_get_stat(intf, sent_lan_command_errs)); seq_printf(m, "retransmitted_lan_commands: %u\n", ipmi_get_stat(intf, retransmitted_lan_commands)); seq_printf(m, "timed_out_lan_commands: %u\n", ipmi_get_stat(intf, timed_out_lan_commands)); seq_printf(m, "sent_lan_responses: %u\n", ipmi_get_stat(intf, sent_lan_responses)); seq_printf(m, "handled_lan_responses: %u\n", ipmi_get_stat(intf, handled_lan_responses)); seq_printf(m, "invalid_lan_responses: %u\n", ipmi_get_stat(intf, invalid_lan_responses)); seq_printf(m, "unhandled_lan_responses: %u\n", ipmi_get_stat(intf, unhandled_lan_responses)); seq_printf(m, "handled_commands: %u\n", ipmi_get_stat(intf, handled_commands)); seq_printf(m, "invalid_commands: %u\n", ipmi_get_stat(intf, invalid_commands)); seq_printf(m, "unhandled_commands: %u\n", ipmi_get_stat(intf, unhandled_commands)); seq_printf(m, "invalid_events: %u\n", ipmi_get_stat(intf, invalid_events)); seq_printf(m, "events: %u\n", ipmi_get_stat(intf, events)); seq_printf(m, "failed rexmit LAN msgs: %u\n", ipmi_get_stat(intf, dropped_rexmit_lan_commands)); seq_printf(m, "failed rexmit IPMB msgs: %u\n", ipmi_get_stat(intf, dropped_rexmit_ipmb_commands)); return 0; } static int smi_stats_proc_open(struct inode *inode, struct file *file) { return single_open(file, smi_stats_proc_show, PDE(inode)->data); } static const struct file_operations smi_stats_proc_ops = { .open = smi_stats_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif /* CONFIG_PROC_FS */ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, const struct file_operations *proc_ops, void *data) { int rv = 0; #ifdef CONFIG_PROC_FS struct proc_dir_entry *file; struct ipmi_proc_entry *entry; /* Create a list element. */ entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; entry->name = kmalloc(strlen(name)+1, GFP_KERNEL); if (!entry->name) { kfree(entry); return -ENOMEM; } strcpy(entry->name, name); file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data); if (!file) { kfree(entry->name); kfree(entry); rv = -ENOMEM; } else { mutex_lock(&smi->proc_entry_lock); /* Stick it on the list. */ entry->next = smi->proc_entries; smi->proc_entries = entry; mutex_unlock(&smi->proc_entry_lock); } #endif /* CONFIG_PROC_FS */ return rv; } EXPORT_SYMBOL(ipmi_smi_add_proc_entry); static int add_proc_entries(ipmi_smi_t smi, int num) { int rv = 0; #ifdef CONFIG_PROC_FS sprintf(smi->proc_dir_name, "%d", num); smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root); if (!smi->proc_dir) rv = -ENOMEM; if (rv == 0) rv = ipmi_smi_add_proc_entry(smi, "stats", &smi_stats_proc_ops, smi); if (rv == 0) rv = ipmi_smi_add_proc_entry(smi, "ipmb", &smi_ipmb_proc_ops, smi); if (rv == 0) rv = ipmi_smi_add_proc_entry(smi, "version", &smi_version_proc_ops, smi); #endif /* CONFIG_PROC_FS */ return rv; } static void remove_proc_entries(ipmi_smi_t smi) { #ifdef CONFIG_PROC_FS struct ipmi_proc_entry *entry; mutex_lock(&smi->proc_entry_lock); while (smi->proc_entries) { entry = smi->proc_entries; smi->proc_entries = entry->next; remove_proc_entry(entry->name, smi->proc_dir); kfree(entry->name); kfree(entry); } mutex_unlock(&smi->proc_entry_lock); remove_proc_entry(smi->proc_dir_name, proc_ipmi_root); #endif /* CONFIG_PROC_FS */ } static int __find_bmc_guid(struct device *dev, void *data) { unsigned char *id = data; struct bmc_device *bmc = dev_get_drvdata(dev); return memcmp(bmc->guid, id, 16) == 0; } static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, unsigned char *guid) { struct device *dev; dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); if (dev) return dev_get_drvdata(dev); else return NULL; } struct prod_dev_id { unsigned int product_id; unsigned char device_id; }; static int __find_bmc_prod_dev_id(struct device *dev, void *data) { struct prod_dev_id *id = data; struct bmc_device *bmc = dev_get_drvdata(dev); return (bmc->id.product_id == id->product_id && bmc->id.device_id == id->device_id); } static struct bmc_device *ipmi_find_bmc_prod_dev_id( struct device_driver *drv, unsigned int product_id, unsigned char device_id) { struct prod_dev_id id = { .product_id = product_id, .device_id = device_id, }; struct device *dev; dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); if (dev) return dev_get_drvdata(dev); else return NULL; } static ssize_t device_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = dev_get_drvdata(dev); return snprintf(buf, 10, "%u\n", bmc->id.device_id); } static ssize_t provides_dev_sdrs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = dev_get_drvdata(dev); return snprintf(buf, 10, "%u\n", (bmc->id.device_revision & 0x80) >> 7); } static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = dev_get_drvdata(dev); return snprintf(buf, 20, "%u\n", bmc->id.device_revision & 0x0F); } static ssize_t firmware_rev_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = dev_get_drvdata(dev); return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1, bmc->id.firmware_revision_2); } static ssize_t ipmi_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = dev_get_drvdata(dev); return snprintf(buf, 20, "%u.%u\n", ipmi_version_major(&bmc->id), ipmi_version_minor(&bmc->id)); } static ssize_t add_dev_support_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = dev_get_drvdata(dev); return snprintf(buf, 10, "0x%02x\n", bmc->id.additional_device_support); } static ssize_t manufacturer_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = dev_get_drvdata(dev); return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id); } static ssize_t product_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = dev_get_drvdata(dev); return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id); } static ssize_t aux_firmware_rev_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = dev_get_drvdata(dev); return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n", bmc->id.aux_firmware_revision[3], bmc->id.aux_firmware_revision[2], bmc->id.aux_firmware_revision[1], bmc->id.aux_firmware_revision[0]); } static ssize_t guid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = dev_get_drvdata(dev); return snprintf(buf, 100, "%Lx%Lx\n", (long long) bmc->guid[0], (long long) bmc->guid[8]); } static void remove_files(struct bmc_device *bmc) { if (!bmc->dev) return; device_remove_file(&bmc->dev->dev, &bmc->device_id_attr); device_remove_file(&bmc->dev->dev, &bmc->provides_dev_sdrs_attr); device_remove_file(&bmc->dev->dev, &bmc->revision_attr); device_remove_file(&bmc->dev->dev, &bmc->firmware_rev_attr); device_remove_file(&bmc->dev->dev, &bmc->version_attr); device_remove_file(&bmc->dev->dev, &bmc->add_dev_support_attr); device_remove_file(&bmc->dev->dev, &bmc->manufacturer_id_attr); device_remove_file(&bmc->dev->dev, &bmc->product_id_attr); if (bmc->id.aux_firmware_revision_set) device_remove_file(&bmc->dev->dev, &bmc->aux_firmware_rev_attr); if (bmc->guid_set) device_remove_file(&bmc->dev->dev, &bmc->guid_attr); } static void cleanup_bmc_device(struct kref *ref) { struct bmc_device *bmc; bmc = container_of(ref, struct bmc_device, refcount); remove_files(bmc); platform_device_unregister(bmc->dev); kfree(bmc); } static void ipmi_bmc_unregister(ipmi_smi_t intf) { struct bmc_device *bmc = intf->bmc; if (intf->sysfs_name) { sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name); kfree(intf->sysfs_name); intf->sysfs_name = NULL; } if (intf->my_dev_name) { sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name); kfree(intf->my_dev_name); intf->my_dev_name = NULL; } mutex_lock(&ipmidriver_mutex); kref_put(&bmc->refcount, cleanup_bmc_device); intf->bmc = NULL; mutex_unlock(&ipmidriver_mutex); } static int create_files(struct bmc_device *bmc) { int err; bmc->device_id_attr.attr.name = "device_id"; bmc->device_id_attr.attr.mode = S_IRUGO; bmc->device_id_attr.show = device_id_show; sysfs_attr_init(&bmc->device_id_attr.attr); bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs"; bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO; bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show; sysfs_attr_init(&bmc->provides_dev_sdrs_attr.attr); bmc->revision_attr.attr.name = "revision"; bmc->revision_attr.attr.mode = S_IRUGO; bmc->revision_attr.show = revision_show; sysfs_attr_init(&bmc->revision_attr.attr); bmc->firmware_rev_attr.attr.name = "firmware_revision"; bmc->firmware_rev_attr.attr.mode = S_IRUGO; bmc->firmware_rev_attr.show = firmware_rev_show; sysfs_attr_init(&bmc->firmware_rev_attr.attr); bmc->version_attr.attr.name = "ipmi_version"; bmc->version_attr.attr.mode = S_IRUGO; bmc->version_attr.show = ipmi_version_show; sysfs_attr_init(&bmc->version_attr.attr); bmc->add_dev_support_attr.attr.name = "additional_device_support"; bmc->add_dev_support_attr.attr.mode = S_IRUGO; bmc->add_dev_support_attr.show = add_dev_support_show; sysfs_attr_init(&bmc->add_dev_support_attr.attr); bmc->manufacturer_id_attr.attr.name = "manufacturer_id"; bmc->manufacturer_id_attr.attr.mode = S_IRUGO; bmc->manufacturer_id_attr.show = manufacturer_id_show; sysfs_attr_init(&bmc->manufacturer_id_attr.attr); bmc->product_id_attr.attr.name = "product_id"; bmc->product_id_attr.attr.mode = S_IRUGO; bmc->product_id_attr.show = product_id_show; sysfs_attr_init(&bmc->product_id_attr.attr); bmc->guid_attr.attr.name = "guid"; bmc->guid_attr.attr.mode = S_IRUGO; bmc->guid_attr.show = guid_show; sysfs_attr_init(&bmc->guid_attr.attr); bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision"; bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO; bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show; sysfs_attr_init(&bmc->aux_firmware_rev_attr.attr); err = device_create_file(&bmc->dev->dev, &bmc->device_id_attr); if (err) goto out; err = device_create_file(&bmc->dev->dev, &bmc->provides_dev_sdrs_attr); if (err) goto out_devid; err = device_create_file(&bmc->dev->dev, &bmc->revision_attr); if (err) goto out_sdrs; err = device_create_file(&bmc->dev->dev, &bmc->firmware_rev_attr); if (err) goto out_rev; err = device_create_file(&bmc->dev->dev, &bmc->version_attr); if (err) goto out_firm; err = device_create_file(&bmc->dev->dev, &bmc->add_dev_support_attr); if (err) goto out_version; err = device_create_file(&bmc->dev->dev, &bmc->manufacturer_id_attr); if (err) goto out_add_dev; err = device_create_file(&bmc->dev->dev, &bmc->product_id_attr); if (err) goto out_manu; if (bmc->id.aux_firmware_revision_set) { err = device_create_file(&bmc->dev->dev, &bmc->aux_firmware_rev_attr); if (err) goto out_prod_id; } if (bmc->guid_set) { err = device_create_file(&bmc->dev->dev, &bmc->guid_attr); if (err) goto out_aux_firm; } return 0; out_aux_firm: if (bmc->id.aux_firmware_revision_set) device_remove_file(&bmc->dev->dev, &bmc->aux_firmware_rev_attr); out_prod_id: device_remove_file(&bmc->dev->dev, &bmc->product_id_attr); out_manu: device_remove_file(&bmc->dev->dev, &bmc->manufacturer_id_attr); out_add_dev: device_remove_file(&bmc->dev->dev, &bmc->add_dev_support_attr); out_version: device_remove_file(&bmc->dev->dev, &bmc->version_attr); out_firm: device_remove_file(&bmc->dev->dev, &bmc->firmware_rev_attr); out_rev: device_remove_file(&bmc->dev->dev, &bmc->revision_attr); out_sdrs: device_remove_file(&bmc->dev->dev, &bmc->provides_dev_sdrs_attr); out_devid: device_remove_file(&bmc->dev->dev, &bmc->device_id_attr); out: return err; } static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, const char *sysfs_name) { int rv; struct bmc_device *bmc = intf->bmc; struct bmc_device *old_bmc; int size; char dummy[1]; mutex_lock(&ipmidriver_mutex); /* * Try to find if there is an bmc_device struct * representing the interfaced BMC already */ if (bmc->guid_set) old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, bmc->guid); else old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, bmc->id.product_id, bmc->id.device_id); /* * If there is already an bmc_device, free the new one, * otherwise register the new BMC device */ if (old_bmc) { kfree(bmc); intf->bmc = old_bmc; bmc = old_bmc; kref_get(&bmc->refcount); mutex_unlock(&ipmidriver_mutex); printk(KERN_INFO "ipmi: interfacing existing BMC (man_id: 0x%6.6x," " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", bmc->id.manufacturer_id, bmc->id.product_id, bmc->id.device_id); } else { char name[14]; unsigned char orig_dev_id = bmc->id.device_id; int warn_printed = 0; snprintf(name, sizeof(name), "ipmi_bmc.%4.4x", bmc->id.product_id); while (ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, bmc->id.product_id, bmc->id.device_id)) { if (!warn_printed) { printk(KERN_WARNING PFX "This machine has two different BMCs" " with the same product id and device" " id. This is an error in the" " firmware, but incrementing the" " device id to work around the problem." " Prod ID = 0x%x, Dev ID = 0x%x\n", bmc->id.product_id, bmc->id.device_id); warn_printed = 1; } bmc->id.device_id++; /* Wraps at 255 */ if (bmc->id.device_id == orig_dev_id) { printk(KERN_ERR PFX "Out of device ids!\n"); break; } } bmc->dev = platform_device_alloc(name, bmc->id.device_id); if (!bmc->dev) { mutex_unlock(&ipmidriver_mutex); printk(KERN_ERR "ipmi_msghandler:" " Unable to allocate platform device\n"); return -ENOMEM; } bmc->dev->dev.driver = &ipmidriver.driver; dev_set_drvdata(&bmc->dev->dev, bmc); kref_init(&bmc->refcount); rv = platform_device_add(bmc->dev); mutex_unlock(&ipmidriver_mutex); if (rv) { platform_device_put(bmc->dev); bmc->dev = NULL; printk(KERN_ERR "ipmi_msghandler:" " Unable to register bmc device: %d\n", rv); /* * Don't go to out_err, you can only do that if * the device is registered already. */ return rv; } rv = create_files(bmc); if (rv) { mutex_lock(&ipmidriver_mutex); platform_device_unregister(bmc->dev); mutex_unlock(&ipmidriver_mutex); return rv; } dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, " "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", bmc->id.manufacturer_id, bmc->id.product_id, bmc->id.device_id); } /* * create symlink from system interface device to bmc device * and back. */ intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL); if (!intf->sysfs_name) { rv = -ENOMEM; printk(KERN_ERR "ipmi_msghandler: allocate link to BMC: %d\n", rv); goto out_err; } rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->dev->dev.kobj, intf->sysfs_name); if (rv) { kfree(intf->sysfs_name); intf->sysfs_name = NULL; printk(KERN_ERR "ipmi_msghandler: Unable to create bmc symlink: %d\n", rv); goto out_err; } size = snprintf(dummy, 0, "ipmi%d", ifnum); intf->my_dev_name = kmalloc(size+1, GFP_KERNEL); if (!intf->my_dev_name) { kfree(intf->sysfs_name); intf->sysfs_name = NULL; rv = -ENOMEM; printk(KERN_ERR "ipmi_msghandler: allocate link from BMC: %d\n", rv); goto out_err; } snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum); rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj, intf->my_dev_name); if (rv) { kfree(intf->sysfs_name); intf->sysfs_name = NULL; kfree(intf->my_dev_name); intf->my_dev_name = NULL; printk(KERN_ERR "ipmi_msghandler:" " Unable to create symlink to bmc: %d\n", rv); goto out_err; } return 0; out_err: ipmi_bmc_unregister(intf); return rv; } static int send_guid_cmd(ipmi_smi_t intf, int chan) { struct kernel_ipmi_msg msg; struct ipmi_system_interface_addr si; si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; si.channel = IPMI_BMC_CHANNEL; si.lun = 0; msg.netfn = IPMI_NETFN_APP_REQUEST; msg.cmd = IPMI_GET_DEVICE_GUID_CMD; msg.data = NULL; msg.data_len = 0; return i_ipmi_request(NULL, intf, (struct ipmi_addr *) &si, 0, &msg, intf, NULL, NULL, 0, intf->channels[0].address, intf->channels[0].lun, -1, 0); } static void guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) { if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) /* Not for me */ return; if (msg->msg.data[0] != 0) { /* Error from getting the GUID, the BMC doesn't have one. */ intf->bmc->guid_set = 0; goto out; } if (msg->msg.data_len < 17) { intf->bmc->guid_set = 0; printk(KERN_WARNING PFX "guid_handler: The GUID response from the BMC was too" " short, it was %d but should have been 17. Assuming" " GUID is not available.\n", msg->msg.data_len); goto out; } memcpy(intf->bmc->guid, msg->msg.data, 16); intf->bmc->guid_set = 1; out: wake_up(&intf->waitq); } static void get_guid(ipmi_smi_t intf) { int rv; intf->bmc->guid_set = 0x2; intf->null_user_handler = guid_handler; rv = send_guid_cmd(intf, 0); if (rv) /* Send failed, no GUID available. */ intf->bmc->guid_set = 0; wait_event(intf->waitq, intf->bmc->guid_set != 2); intf->null_user_handler = NULL; } static int send_channel_info_cmd(ipmi_smi_t intf, int chan) { struct kernel_ipmi_msg msg; unsigned char data[1]; struct ipmi_system_interface_addr si; si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; si.channel = IPMI_BMC_CHANNEL; si.lun = 0; msg.netfn = IPMI_NETFN_APP_REQUEST; msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; msg.data = data; msg.data_len = 1; data[0] = chan; return i_ipmi_request(NULL, intf, (struct ipmi_addr *) &si, 0, &msg, intf, NULL, NULL, 0, intf->channels[0].address, intf->channels[0].lun, -1, 0); } static void channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) { int rv = 0; int chan; if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { /* It's the one we want */ if (msg->msg.data[0] != 0) { /* Got an error from the channel, just go on. */ if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { /* * If the MC does not support this * command, that is legal. We just * assume it has one IPMB at channel * zero. */ intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; rv = -ENOSYS; intf->curr_channel = IPMI_MAX_CHANNELS; wake_up(&intf->waitq); goto out; } goto next_channel; } if (msg->msg.data_len < 4) { /* Message not big enough, just go on. */ goto next_channel; } chan = intf->curr_channel; intf->channels[chan].medium = msg->msg.data[2] & 0x7f; intf->channels[chan].protocol = msg->msg.data[3] & 0x1f; next_channel: intf->curr_channel++; if (intf->curr_channel >= IPMI_MAX_CHANNELS) wake_up(&intf->waitq); else rv = send_channel_info_cmd(intf, intf->curr_channel); if (rv) { /* Got an error somehow, just give up. */ intf->curr_channel = IPMI_MAX_CHANNELS; wake_up(&intf->waitq); printk(KERN_WARNING PFX "Error sending channel information: %d\n", rv); } } out: return; } static void ipmi_poll(ipmi_smi_t intf) { if (intf->handlers->poll) intf->handlers->poll(intf->send_info); /* In case something came in */ handle_new_recv_msgs(intf); } void ipmi_poll_interface(ipmi_user_t user) { ipmi_poll(user->intf); } EXPORT_SYMBOL(ipmi_poll_interface); int ipmi_register_smi(struct ipmi_smi_handlers *handlers, void *send_info, struct ipmi_device_id *device_id, struct device *si_dev, const char *sysfs_name, unsigned char slave_addr) { int i, j; int rv; ipmi_smi_t intf; ipmi_smi_t tintf; struct list_head *link; /* * Make sure the driver is actually initialized, this handles * problems with initialization order. */ if (!initialized) { rv = ipmi_init_msghandler(); if (rv) return rv; /* * The init code doesn't return an error if it was turned * off, but it won't initialize. Check that. */ if (!initialized) return -ENODEV; } intf = kzalloc(sizeof(*intf), GFP_KERNEL); if (!intf) return -ENOMEM; intf->ipmi_version_major = ipmi_version_major(device_id); intf->ipmi_version_minor = ipmi_version_minor(device_id); intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL); if (!intf->bmc) { kfree(intf); return -ENOMEM; } intf->intf_num = -1; /* Mark it invalid for now. */ kref_init(&intf->refcount); intf->bmc->id = *device_id; intf->si_dev = si_dev; for (j = 0; j < IPMI_MAX_CHANNELS; j++) { intf->channels[j].address = IPMI_BMC_SLAVE_ADDR; intf->channels[j].lun = 2; } if (slave_addr != 0) intf->channels[0].address = slave_addr; INIT_LIST_HEAD(&intf->users); intf->handlers = handlers; intf->send_info = send_info; spin_lock_init(&intf->seq_lock); for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { intf->seq_table[j].inuse = 0; intf->seq_table[j].seqid = 0; } intf->curr_seq = 0; #ifdef CONFIG_PROC_FS mutex_init(&intf->proc_entry_lock); #endif spin_lock_init(&intf->waiting_msgs_lock); INIT_LIST_HEAD(&intf->waiting_msgs); tasklet_init(&intf->recv_tasklet, smi_recv_tasklet, (unsigned long) intf); atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); spin_lock_init(&intf->events_lock); INIT_LIST_HEAD(&intf->waiting_events); intf->waiting_events_count = 0; mutex_init(&intf->cmd_rcvrs_mutex); spin_lock_init(&intf->maintenance_mode_lock); INIT_LIST_HEAD(&intf->cmd_rcvrs); init_waitqueue_head(&intf->waitq); for (i = 0; i < IPMI_NUM_STATS; i++) atomic_set(&intf->stats[i], 0); intf->proc_dir = NULL; mutex_lock(&smi_watchers_mutex); mutex_lock(&ipmi_interfaces_mutex); /* Look for a hole in the numbers. */ i = 0; link = &ipmi_interfaces; list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) { if (tintf->intf_num != i) { link = &tintf->link; break; } i++; } /* Add the new interface in numeric order. */ if (i == 0) list_add_rcu(&intf->link, &ipmi_interfaces); else list_add_tail_rcu(&intf->link, link); rv = handlers->start_processing(send_info, intf); if (rv) goto out; get_guid(intf); if ((intf->ipmi_version_major > 1) || ((intf->ipmi_version_major == 1) && (intf->ipmi_version_minor >= 5))) { /* * Start scanning the channels to see what is * available. */ intf->null_user_handler = channel_handler; intf->curr_channel = 0; rv = send_channel_info_cmd(intf, 0); if (rv) goto out; /* Wait for the channel info to be read. */ wait_event(intf->waitq, intf->curr_channel >= IPMI_MAX_CHANNELS); intf->null_user_handler = NULL; } else { /* Assume a single IPMB channel at zero. */ intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; intf->curr_channel = IPMI_MAX_CHANNELS; } if (rv == 0) rv = add_proc_entries(intf, i); rv = ipmi_bmc_register(intf, i, sysfs_name); out: if (rv) { if (intf->proc_dir) remove_proc_entries(intf); intf->handlers = NULL; list_del_rcu(&intf->link); mutex_unlock(&ipmi_interfaces_mutex); mutex_unlock(&smi_watchers_mutex); synchronize_rcu(); kref_put(&intf->refcount, intf_free); } else { /* * Keep memory order straight for RCU readers. Make * sure everything else is committed to memory before * setting intf_num to mark the interface valid. */ smp_wmb(); intf->intf_num = i; mutex_unlock(&ipmi_interfaces_mutex); /* After this point the interface is legal to use. */ call_smi_watchers(i, intf->si_dev); mutex_unlock(&smi_watchers_mutex); } return rv; } EXPORT_SYMBOL(ipmi_register_smi); static void cleanup_smi_msgs(ipmi_smi_t intf) { int i; struct seq_table *ent; /* No need for locks, the interface is down. */ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { ent = &(intf->seq_table[i]); if (!ent->inuse) continue; deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED); } } int ipmi_unregister_smi(ipmi_smi_t intf) { struct ipmi_smi_watcher *w; int intf_num = intf->intf_num; ipmi_bmc_unregister(intf); mutex_lock(&smi_watchers_mutex); mutex_lock(&ipmi_interfaces_mutex); intf->intf_num = -1; intf->handlers = NULL; list_del_rcu(&intf->link); mutex_unlock(&ipmi_interfaces_mutex); synchronize_rcu(); cleanup_smi_msgs(intf); remove_proc_entries(intf); /* * Call all the watcher interfaces to tell them that * an interface is gone. */ list_for_each_entry(w, &smi_watchers, link) w->smi_gone(intf_num); mutex_unlock(&smi_watchers_mutex); kref_put(&intf->refcount, intf_free); return 0; } EXPORT_SYMBOL(ipmi_unregister_smi); static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { struct ipmi_ipmb_addr ipmb_addr; struct ipmi_recv_msg *recv_msg; /* * This is 11, not 10, because the response must contain a * completion code. */ if (msg->rsp_size < 11) { /* Message not big enough, just ignore it. */ ipmi_inc_stat(intf, invalid_ipmb_responses); return 0; } if (msg->rsp[2] != 0) { /* An error getting the response, just ignore it. */ return 0; } ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; ipmb_addr.slave_addr = msg->rsp[6]; ipmb_addr.channel = msg->rsp[3] & 0x0f; ipmb_addr.lun = msg->rsp[7] & 3; /* * It's a response from a remote entity. Look up the sequence * number and handle the response. */ if (intf_find_seq(intf, msg->rsp[7] >> 2, msg->rsp[3] & 0x0f, msg->rsp[8], (msg->rsp[4] >> 2) & (~1), (struct ipmi_addr *) &(ipmb_addr), &recv_msg)) { /* * We were unable to find the sequence number, * so just nuke the message. */ ipmi_inc_stat(intf, unhandled_ipmb_responses); return 0; } memcpy(recv_msg->msg_data, &(msg->rsp[9]), msg->rsp_size - 9); /* * The other fields matched, so no need to set them, except * for netfn, which needs to be the response that was * returned, not the request value. */ recv_msg->msg.netfn = msg->rsp[4] >> 2; recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg.data_len = msg->rsp_size - 10; recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; ipmi_inc_stat(intf, handled_ipmb_responses); deliver_response(recv_msg); return 0; } static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { struct cmd_rcvr *rcvr; int rv = 0; unsigned char netfn; unsigned char cmd; unsigned char chan; ipmi_user_t user = NULL; struct ipmi_ipmb_addr *ipmb_addr; struct ipmi_recv_msg *recv_msg; struct ipmi_smi_handlers *handlers; if (msg->rsp_size < 10) { /* Message not big enough, just ignore it. */ ipmi_inc_stat(intf, invalid_commands); return 0; } if (msg->rsp[2] != 0) { /* An error getting the response, just ignore it. */ return 0; } netfn = msg->rsp[4] >> 2; cmd = msg->rsp[8]; chan = msg->rsp[3] & 0xf; rcu_read_lock(); rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; kref_get(&user->refcount); } else user = NULL; rcu_read_unlock(); if (user == NULL) { /* We didn't find a user, deliver an error response. */ ipmi_inc_stat(intf, unhandled_commands); msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); msg->data[1] = IPMI_SEND_MSG_CMD; msg->data[2] = msg->rsp[3]; msg->data[3] = msg->rsp[6]; msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); msg->data[5] = ipmb_checksum(&(msg->data[3]), 2); msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address; /* rqseq/lun */ msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); msg->data[8] = msg->rsp[8]; /* cmd */ msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; msg->data[10] = ipmb_checksum(&(msg->data[6]), 4); msg->data_size = 11; #ifdef DEBUG_MSGING { int m; printk("Invalid command:"); for (m = 0; m < msg->data_size; m++) printk(" %2.2x", msg->data[m]); printk("\n"); } #endif rcu_read_lock(); handlers = intf->handlers; if (handlers) { handlers->sender(intf->send_info, msg, 0); /* * We used the message, so return the value * that causes it to not be freed or * queued. */ rv = -1; } rcu_read_unlock(); } else { /* Deliver the message to the user. */ ipmi_inc_stat(intf, handled_commands); recv_msg = ipmi_alloc_recv_msg(); if (!recv_msg) { /* * We couldn't allocate memory for the * message, so requeue it for handling * later. */ rv = 1; kref_put(&user->refcount, free_user); } else { /* Extract the source address from the data. */ ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; ipmb_addr->slave_addr = msg->rsp[6]; ipmb_addr->lun = msg->rsp[7] & 3; ipmb_addr->channel = msg->rsp[3] & 0xf; /* * Extract the rest of the message information * from the IPMB header. */ recv_msg->user = user; recv_msg->recv_type = IPMI_CMD_RECV_TYPE; recv_msg->msgid = msg->rsp[7] >> 2; recv_msg->msg.netfn = msg->rsp[4] >> 2; recv_msg->msg.cmd = msg->rsp[8]; recv_msg->msg.data = recv_msg->msg_data; /* * We chop off 10, not 9 bytes because the checksum * at the end also needs to be removed. */ recv_msg->msg.data_len = msg->rsp_size - 10; memcpy(recv_msg->msg_data, &(msg->rsp[9]), msg->rsp_size - 10); deliver_response(recv_msg); } } return rv; } static int handle_lan_get_msg_rsp(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { struct ipmi_lan_addr lan_addr; struct ipmi_recv_msg *recv_msg; /* * This is 13, not 12, because the response must contain a * completion code. */ if (msg->rsp_size < 13) { /* Message not big enough, just ignore it. */ ipmi_inc_stat(intf, invalid_lan_responses); return 0; } if (msg->rsp[2] != 0) { /* An error getting the response, just ignore it. */ return 0; } lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; lan_addr.session_handle = msg->rsp[4]; lan_addr.remote_SWID = msg->rsp[8]; lan_addr.local_SWID = msg->rsp[5]; lan_addr.channel = msg->rsp[3] & 0x0f; lan_addr.privilege = msg->rsp[3] >> 4; lan_addr.lun = msg->rsp[9] & 3; /* * It's a response from a remote entity. Look up the sequence * number and handle the response. */ if (intf_find_seq(intf, msg->rsp[9] >> 2, msg->rsp[3] & 0x0f, msg->rsp[10], (msg->rsp[6] >> 2) & (~1), (struct ipmi_addr *) &(lan_addr), &recv_msg)) { /* * We were unable to find the sequence number, * so just nuke the message. */ ipmi_inc_stat(intf, unhandled_lan_responses); return 0; } memcpy(recv_msg->msg_data, &(msg->rsp[11]), msg->rsp_size - 11); /* * The other fields matched, so no need to set them, except * for netfn, which needs to be the response that was * returned, not the request value. */ recv_msg->msg.netfn = msg->rsp[6] >> 2; recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg.data_len = msg->rsp_size - 12; recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; ipmi_inc_stat(intf, handled_lan_responses); deliver_response(recv_msg); return 0; } static int handle_lan_get_msg_cmd(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { struct cmd_rcvr *rcvr; int rv = 0; unsigned char netfn; unsigned char cmd; unsigned char chan; ipmi_user_t user = NULL; struct ipmi_lan_addr *lan_addr; struct ipmi_recv_msg *recv_msg; if (msg->rsp_size < 12) { /* Message not big enough, just ignore it. */ ipmi_inc_stat(intf, invalid_commands); return 0; } if (msg->rsp[2] != 0) { /* An error getting the response, just ignore it. */ return 0; } netfn = msg->rsp[6] >> 2; cmd = msg->rsp[10]; chan = msg->rsp[3] & 0xf; rcu_read_lock(); rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; kref_get(&user->refcount); } else user = NULL; rcu_read_unlock(); if (user == NULL) { /* We didn't find a user, just give up. */ ipmi_inc_stat(intf, unhandled_commands); /* * Don't do anything with these messages, just allow * them to be freed. */ rv = 0; } else { /* Deliver the message to the user. */ ipmi_inc_stat(intf, handled_commands); recv_msg = ipmi_alloc_recv_msg(); if (!recv_msg) { /* * We couldn't allocate memory for the * message, so requeue it for handling later. */ rv = 1; kref_put(&user->refcount, free_user); } else { /* Extract the source address from the data. */ lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; lan_addr->session_handle = msg->rsp[4]; lan_addr->remote_SWID = msg->rsp[8]; lan_addr->local_SWID = msg->rsp[5]; lan_addr->lun = msg->rsp[9] & 3; lan_addr->channel = msg->rsp[3] & 0xf; lan_addr->privilege = msg->rsp[3] >> 4; /* * Extract the rest of the message information * from the IPMB header. */ recv_msg->user = user; recv_msg->recv_type = IPMI_CMD_RECV_TYPE; recv_msg->msgid = msg->rsp[9] >> 2; recv_msg->msg.netfn = msg->rsp[6] >> 2; recv_msg->msg.cmd = msg->rsp[10]; recv_msg->msg.data = recv_msg->msg_data; /* * We chop off 12, not 11 bytes because the checksum * at the end also needs to be removed. */ recv_msg->msg.data_len = msg->rsp_size - 12; memcpy(recv_msg->msg_data, &(msg->rsp[11]), msg->rsp_size - 12); deliver_response(recv_msg); } } return rv; } /* * This routine will handle "Get Message" command responses with * channels that use an OEM Medium. The message format belongs to * the OEM. See IPMI 2.0 specification, Chapter 6 and * Chapter 22, sections 22.6 and 22.24 for more details. */ static int handle_oem_get_msg_cmd(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { struct cmd_rcvr *rcvr; int rv = 0; unsigned char netfn; unsigned char cmd; unsigned char chan; ipmi_user_t user = NULL; struct ipmi_system_interface_addr *smi_addr; struct ipmi_recv_msg *recv_msg; /* * We expect the OEM SW to perform error checking * so we just do some basic sanity checks */ if (msg->rsp_size < 4) { /* Message not big enough, just ignore it. */ ipmi_inc_stat(intf, invalid_commands); return 0; } if (msg->rsp[2] != 0) { /* An error getting the response, just ignore it. */ return 0; } /* * This is an OEM Message so the OEM needs to know how * handle the message. We do no interpretation. */ netfn = msg->rsp[0] >> 2; cmd = msg->rsp[1]; chan = msg->rsp[3] & 0xf; rcu_read_lock(); rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; kref_get(&user->refcount); } else user = NULL; rcu_read_unlock(); if (user == NULL) { /* We didn't find a user, just give up. */ ipmi_inc_stat(intf, unhandled_commands); /* * Don't do anything with these messages, just allow * them to be freed. */ rv = 0; } else { /* Deliver the message to the user. */ ipmi_inc_stat(intf, handled_commands); recv_msg = ipmi_alloc_recv_msg(); if (!recv_msg) { /* * We couldn't allocate memory for the * message, so requeue it for handling * later. */ rv = 1; kref_put(&user->refcount, free_user); } else { /* * OEM Messages are expected to be delivered via * the system interface to SMS software. We might * need to visit this again depending on OEM * requirements */ smi_addr = ((struct ipmi_system_interface_addr *) &(recv_msg->addr)); smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; smi_addr->channel = IPMI_BMC_CHANNEL; smi_addr->lun = msg->rsp[0] & 3; recv_msg->user = user; recv_msg->user_msg_data = NULL; recv_msg->recv_type = IPMI_OEM_RECV_TYPE; recv_msg->msg.netfn = msg->rsp[0] >> 2; recv_msg->msg.cmd = msg->rsp[1]; recv_msg->msg.data = recv_msg->msg_data; /* * The message starts at byte 4 which follows the * the Channel Byte in the "GET MESSAGE" command */ recv_msg->msg.data_len = msg->rsp_size - 4; memcpy(recv_msg->msg_data, &(msg->rsp[4]), msg->rsp_size - 4); deliver_response(recv_msg); } } return rv; } static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, struct ipmi_smi_msg *msg) { struct ipmi_system_interface_addr *smi_addr; recv_msg->msgid = 0; smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr); smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; smi_addr->channel = IPMI_BMC_CHANNEL; smi_addr->lun = msg->rsp[0] & 3; recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; recv_msg->msg.netfn = msg->rsp[0] >> 2; recv_msg->msg.cmd = msg->rsp[1]; memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3); recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg.data_len = msg->rsp_size - 3; } static int handle_read_event_rsp(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { struct ipmi_recv_msg *recv_msg, *recv_msg2; struct list_head msgs; ipmi_user_t user; int rv = 0; int deliver_count = 0; unsigned long flags; if (msg->rsp_size < 19) { /* Message is too small to be an IPMB event. */ ipmi_inc_stat(intf, invalid_events); return 0; } if (msg->rsp[2] != 0) { /* An error getting the event, just ignore it. */ return 0; } INIT_LIST_HEAD(&msgs); spin_lock_irqsave(&intf->events_lock, flags); ipmi_inc_stat(intf, events); /* * Allocate and fill in one message for every user that is * getting events. */ rcu_read_lock(); list_for_each_entry_rcu(user, &intf->users, link) { if (!user->gets_events) continue; recv_msg = ipmi_alloc_recv_msg(); if (!recv_msg) { rcu_read_unlock(); list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { list_del(&recv_msg->link); ipmi_free_recv_msg(recv_msg); } /* * We couldn't allocate memory for the * message, so requeue it for handling * later. */ rv = 1; goto out; } deliver_count++; copy_event_into_recv_msg(recv_msg, msg); recv_msg->user = user; kref_get(&user->refcount); list_add_tail(&(recv_msg->link), &msgs); } rcu_read_unlock(); if (deliver_count) { /* Now deliver all the messages. */ list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { list_del(&recv_msg->link); deliver_response(recv_msg); } } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { /* * No one to receive the message, put it in queue if there's * not already too many things in the queue. */ recv_msg = ipmi_alloc_recv_msg(); if (!recv_msg) { /* * We couldn't allocate memory for the * message, so requeue it for handling * later. */ rv = 1; goto out; } copy_event_into_recv_msg(recv_msg, msg); list_add_tail(&(recv_msg->link), &(intf->waiting_events)); intf->waiting_events_count++; } else if (!intf->event_msg_printed) { /* * There's too many things in the queue, discard this * message. */ printk(KERN_WARNING PFX "Event queue full, discarding" " incoming events\n"); intf->event_msg_printed = 1; } out: spin_unlock_irqrestore(&(intf->events_lock), flags); return rv; } static int handle_bmc_rsp(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { struct ipmi_recv_msg *recv_msg; struct ipmi_user *user; recv_msg = (struct ipmi_recv_msg *) msg->user_data; if (recv_msg == NULL) { printk(KERN_WARNING "IPMI message received with no owner. This\n" "could be because of a malformed message, or\n" "because of a hardware error. Contact your\n" "hardware vender for assistance\n"); return 0; } user = recv_msg->user; /* Make sure the user still exists. */ if (user && !user->valid) { /* The user for the message went away, so give up. */ ipmi_inc_stat(intf, unhandled_local_responses); ipmi_free_recv_msg(recv_msg); } else { struct ipmi_system_interface_addr *smi_addr; ipmi_inc_stat(intf, handled_local_responses); recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; recv_msg->msgid = msg->msgid; smi_addr = ((struct ipmi_system_interface_addr *) &(recv_msg->addr)); smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; smi_addr->channel = IPMI_BMC_CHANNEL; smi_addr->lun = msg->rsp[0] & 3; recv_msg->msg.netfn = msg->rsp[0] >> 2; recv_msg->msg.cmd = msg->rsp[1]; memcpy(recv_msg->msg_data, &(msg->rsp[2]), msg->rsp_size - 2); recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg.data_len = msg->rsp_size - 2; deliver_response(recv_msg); } return 0; } /* * Handle a received message. Return 1 if the message should be requeued, * 0 if the message should be freed, or -1 if the message should not * be freed or requeued. */ static int handle_one_recv_msg(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { int requeue; int chan; #ifdef DEBUG_MSGING int m; printk("Recv:"); for (m = 0; m < msg->rsp_size; m++) printk(" %2.2x", msg->rsp[m]); printk("\n"); #endif if (msg->rsp_size < 2) { /* Message is too small to be correct. */ printk(KERN_WARNING PFX "BMC returned to small a message" " for netfn %x cmd %x, got %d bytes\n", (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); /* Generate an error response for the message. */ msg->rsp[0] = msg->data[0] | (1 << 2); msg->rsp[1] = msg->data[1]; msg->rsp[2] = IPMI_ERR_UNSPECIFIED; msg->rsp_size = 3; } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) || (msg->rsp[1] != msg->data[1])) { /* * The NetFN and Command in the response is not even * marginally correct. */ printk(KERN_WARNING PFX "BMC returned incorrect response," " expected netfn %x cmd %x, got netfn %x cmd %x\n", (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp[0] >> 2, msg->rsp[1]); /* Generate an error response for the message. */ msg->rsp[0] = msg->data[0] | (1 << 2); msg->rsp[1] = msg->data[1]; msg->rsp[2] = IPMI_ERR_UNSPECIFIED; msg->rsp_size = 3; } if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) && (msg->rsp[1] == IPMI_SEND_MSG_CMD) && (msg->user_data != NULL)) { /* * It's a response to a response we sent. For this we * deliver a send message response to the user. */ struct ipmi_recv_msg *recv_msg = msg->user_data; requeue = 0; if (msg->rsp_size < 2) /* Message is too small to be correct. */ goto out; chan = msg->data[2] & 0x0f; if (chan >= IPMI_MAX_CHANNELS) /* Invalid channel number */ goto out; if (!recv_msg) goto out; /* Make sure the user still exists. */ if (!recv_msg->user || !recv_msg->user->valid) goto out; recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg.data_len = 1; recv_msg->msg_data[0] = msg->rsp[2]; deliver_response(recv_msg); } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { /* It's from the receive queue. */ chan = msg->rsp[3] & 0xf; if (chan >= IPMI_MAX_CHANNELS) { /* Invalid channel number */ requeue = 0; goto out; } /* * We need to make sure the channels have been initialized. * The channel_handler routine will set the "curr_channel" * equal to or greater than IPMI_MAX_CHANNELS when all the * channels for this interface have been initialized. */ if (intf->curr_channel < IPMI_MAX_CHANNELS) { requeue = 0; /* Throw the message away */ goto out; } switch (intf->channels[chan].medium) { case IPMI_CHANNEL_MEDIUM_IPMB: if (msg->rsp[4] & 0x04) { /* * It's a response, so find the * requesting message and send it up. */ requeue = handle_ipmb_get_msg_rsp(intf, msg); } else { /* * It's a command to the SMS from some other * entity. Handle that. */ requeue = handle_ipmb_get_msg_cmd(intf, msg); } break; case IPMI_CHANNEL_MEDIUM_8023LAN: case IPMI_CHANNEL_MEDIUM_ASYNC: if (msg->rsp[6] & 0x04) { /* * It's a response, so find the * requesting message and send it up. */ requeue = handle_lan_get_msg_rsp(intf, msg); } else { /* * It's a command to the SMS from some other * entity. Handle that. */ requeue = handle_lan_get_msg_cmd(intf, msg); } break; default: /* Check for OEM Channels. Clients had better register for these commands. */ if ((intf->channels[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN) && (intf->channels[chan].medium <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) { requeue = handle_oem_get_msg_cmd(intf, msg); } else { /* * We don't handle the channel type, so just * free the message. */ requeue = 0; } } } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { /* It's an asyncronous event. */ requeue = handle_read_event_rsp(intf, msg); } else { /* It's a response from the local BMC. */ requeue = handle_bmc_rsp(intf, msg); } out: return requeue; } /* * If there are messages in the queue or pretimeouts, handle them. */ static void handle_new_recv_msgs(ipmi_smi_t intf) { struct ipmi_smi_msg *smi_msg; unsigned long flags = 0; int rv; int run_to_completion = intf->run_to_completion; /* See if any waiting messages need to be processed. */ if (!run_to_completion) spin_lock_irqsave(&intf->waiting_msgs_lock, flags); while (!list_empty(&intf->waiting_msgs)) { smi_msg = list_entry(intf->waiting_msgs.next, struct ipmi_smi_msg, link); list_del(&smi_msg->link); if (!run_to_completion) spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); rv = handle_one_recv_msg(intf, smi_msg); if (!run_to_completion) spin_lock_irqsave(&intf->waiting_msgs_lock, flags); if (rv == 0) { /* Message handled */ ipmi_free_smi_msg(smi_msg); } else if (rv < 0) { /* Fatal error on the message, del but don't free. */ } else { /* * To preserve message order, quit if we * can't handle a message. */ list_add(&smi_msg->link, &intf->waiting_msgs); break; } } if (!run_to_completion) spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); /* * If the pretimout count is non-zero, decrement one from it and * deliver pretimeouts to all the users. */ if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { ipmi_user_t user; rcu_read_lock(); list_for_each_entry_rcu(user, &intf->users, link) { if (user->handler->ipmi_watchdog_pretimeout) user->handler->ipmi_watchdog_pretimeout( user->handler_data); } rcu_read_unlock(); } } static void smi_recv_tasklet(unsigned long val) { handle_new_recv_msgs((ipmi_smi_t) val); } /* Handle a new message from the lower layer. */ void ipmi_smi_msg_received(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { unsigned long flags = 0; /* keep us warning-free. */ int run_to_completion; if ((msg->data_size >= 2) && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) && (msg->data[1] == IPMI_SEND_MSG_CMD) && (msg->user_data == NULL)) { /* * This is the local response to a command send, start * the timer for these. The user_data will not be * NULL if this is a response send, and we will let * response sends just go through. */ /* * Check for errors, if we get certain errors (ones * that mean basically we can try again later), we * ignore them and start the timer. Otherwise we * report the error immediately. */ if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) && (msg->rsp[2] != IPMI_BUS_ERR) && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { int chan = msg->rsp[3] & 0xf; /* Got an error sending the message, handle it. */ if (chan >= IPMI_MAX_CHANNELS) ; /* This shouldn't happen */ else if ((intf->channels[chan].medium == IPMI_CHANNEL_MEDIUM_8023LAN) || (intf->channels[chan].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) ipmi_inc_stat(intf, sent_lan_command_errs); else ipmi_inc_stat(intf, sent_ipmb_command_errs); intf_err_seq(intf, msg->msgid, msg->rsp[2]); } else /* The message was sent, start the timer. */ intf_start_seq_timer(intf, msg->msgid); ipmi_free_smi_msg(msg); goto out; } /* * To preserve message order, if the list is not empty, we * tack this message onto the end of the list. */ run_to_completion = intf->run_to_completion; if (!run_to_completion) spin_lock_irqsave(&intf->waiting_msgs_lock, flags); list_add_tail(&msg->link, &intf->waiting_msgs); if (!run_to_completion) spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); tasklet_schedule(&intf->recv_tasklet); out: return; } EXPORT_SYMBOL(ipmi_smi_msg_received); void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) { atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); tasklet_schedule(&intf->recv_tasklet); } EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); static struct ipmi_smi_msg * smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, unsigned char seq, long seqid) { struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); if (!smi_msg) /* * If we can't allocate the message, then just return, we * get 4 retries, so this should be ok. */ return NULL; memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); smi_msg->data_size = recv_msg->msg.data_len; smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); #ifdef DEBUG_MSGING { int m; printk("Resend: "); for (m = 0; m < smi_msg->data_size; m++) printk(" %2.2x", smi_msg->data[m]); printk("\n"); } #endif return smi_msg; } static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, struct list_head *timeouts, long timeout_period, int slot, unsigned long *flags) { struct ipmi_recv_msg *msg; struct ipmi_smi_handlers *handlers; if (intf->intf_num == -1) return; if (!ent->inuse) return; ent->timeout -= timeout_period; if (ent->timeout > 0) return; if (ent->retries_left == 0) { /* The message has used all its retries. */ ent->inuse = 0; msg = ent->recv_msg; list_add_tail(&msg->link, timeouts); if (ent->broadcast) ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); else if (is_lan_addr(&ent->recv_msg->addr)) ipmi_inc_stat(intf, timed_out_lan_commands); else ipmi_inc_stat(intf, timed_out_ipmb_commands); } else { struct ipmi_smi_msg *smi_msg; /* More retries, send again. */ /* * Start with the max timer, set to normal timer after * the message is sent. */ ent->timeout = MAX_MSG_TIMEOUT; ent->retries_left--; smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, ent->seqid); if (!smi_msg) { if (is_lan_addr(&ent->recv_msg->addr)) ipmi_inc_stat(intf, dropped_rexmit_lan_commands); else ipmi_inc_stat(intf, dropped_rexmit_ipmb_commands); return; } spin_unlock_irqrestore(&intf->seq_lock, *flags); /* * Send the new message. We send with a zero * priority. It timed out, I doubt time is that * critical now, and high priority messages are really * only for messages to the local MC, which don't get * resent. */ handlers = intf->handlers; if (handlers) { if (is_lan_addr(&ent->recv_msg->addr)) ipmi_inc_stat(intf, retransmitted_lan_commands); else ipmi_inc_stat(intf, retransmitted_ipmb_commands); intf->handlers->sender(intf->send_info, smi_msg, 0); } else ipmi_free_smi_msg(smi_msg); spin_lock_irqsave(&intf->seq_lock, *flags); } } static void ipmi_timeout_handler(long timeout_period) { ipmi_smi_t intf; struct list_head timeouts; struct ipmi_recv_msg *msg, *msg2; unsigned long flags; int i; rcu_read_lock(); list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { tasklet_schedule(&intf->recv_tasklet); /* * Go through the seq table and find any messages that * have timed out, putting them in the timeouts * list. */ INIT_LIST_HEAD(&timeouts); spin_lock_irqsave(&intf->seq_lock, flags); for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) check_msg_timeout(intf, &(intf->seq_table[i]), &timeouts, timeout_period, i, &flags); spin_unlock_irqrestore(&intf->seq_lock, flags); list_for_each_entry_safe(msg, msg2, &timeouts, link) deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); /* * Maintenance mode handling. Check the timeout * optimistically before we claim the lock. It may * mean a timeout gets missed occasionally, but that * only means the timeout gets extended by one period * in that case. No big deal, and it avoids the lock * most of the time. */ if (intf->auto_maintenance_timeout > 0) { spin_lock_irqsave(&intf->maintenance_mode_lock, flags); if (intf->auto_maintenance_timeout > 0) { intf->auto_maintenance_timeout -= timeout_period; if (!intf->maintenance_mode && (intf->auto_maintenance_timeout <= 0)) { intf->maintenance_mode_enable = 0; maintenance_mode_update(intf); } } spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); } } rcu_read_unlock(); } static void ipmi_request_event(void) { ipmi_smi_t intf; struct ipmi_smi_handlers *handlers; rcu_read_lock(); /* * Called from the timer, no need to check if handlers is * valid. */ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { /* No event requests when in maintenance mode. */ if (intf->maintenance_mode_enable) continue; handlers = intf->handlers; if (handlers) handlers->request_events(intf->send_info); } rcu_read_unlock(); } static struct timer_list ipmi_timer; /* Call every ~1000 ms. */ #define IPMI_TIMEOUT_TIME 1000 /* How many jiffies does it take to get to the timeout time. */ #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) /* * Request events from the queue every second (this is the number of * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the * future, IPMI will add a way to know immediately if an event is in * the queue and this silliness can go away. */ #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) static atomic_t stop_operation; static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME; static void ipmi_timeout(unsigned long data) { if (atomic_read(&stop_operation)) return; ticks_to_req_ev--; if (ticks_to_req_ev == 0) { ipmi_request_event(); ticks_to_req_ev = IPMI_REQUEST_EV_TIME; } ipmi_timeout_handler(IPMI_TIMEOUT_TIME); mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); } static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); /* FIXME - convert these to slabs. */ static void free_smi_msg(struct ipmi_smi_msg *msg) { atomic_dec(&smi_msg_inuse_count); kfree(msg); } struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) { struct ipmi_smi_msg *rv; rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); if (rv) { rv->done = free_smi_msg; rv->user_data = NULL; atomic_inc(&smi_msg_inuse_count); } return rv; } EXPORT_SYMBOL(ipmi_alloc_smi_msg); static void free_recv_msg(struct ipmi_recv_msg *msg) { atomic_dec(&recv_msg_inuse_count); kfree(msg); } static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) { struct ipmi_recv_msg *rv; rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); if (rv) { rv->user = NULL; rv->done = free_recv_msg; atomic_inc(&recv_msg_inuse_count); } return rv; } void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) { if (msg->user) kref_put(&msg->user->refcount, free_user); msg->done(msg); } EXPORT_SYMBOL(ipmi_free_recv_msg); #ifdef CONFIG_IPMI_PANIC_EVENT static atomic_t panic_done_count = ATOMIC_INIT(0); static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) { atomic_dec(&panic_done_count); } static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) { atomic_dec(&panic_done_count); } /* * Inside a panic, send a message and wait for a response. */ static void ipmi_panic_request_and_wait(ipmi_smi_t intf, struct ipmi_addr *addr, struct kernel_ipmi_msg *msg) { struct ipmi_smi_msg smi_msg; struct ipmi_recv_msg recv_msg; int rv; smi_msg.done = dummy_smi_done_handler; recv_msg.done = dummy_recv_done_handler; atomic_add(2, &panic_done_count); rv = i_ipmi_request(NULL, intf, addr, 0, msg, intf, &smi_msg, &recv_msg, 0, intf->channels[0].address, intf->channels[0].lun, 0, 1); /* Don't retry, and don't wait. */ if (rv) atomic_sub(2, &panic_done_count); while (atomic_read(&panic_done_count) != 0) ipmi_poll(intf); } #ifdef CONFIG_IPMI_PANIC_STRING static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) { if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { /* A get event receiver command, save it. */ intf->event_receiver = msg->msg.data[1]; intf->event_receiver_lun = msg->msg.data[2] & 0x3; } } static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) { if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { /* * A get device id command, save if we are an event * receiver or generator. */ intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; } } #endif static void send_panic_events(char *str) { struct kernel_ipmi_msg msg; ipmi_smi_t intf; unsigned char data[16]; struct ipmi_system_interface_addr *si; struct ipmi_addr addr; si = (struct ipmi_system_interface_addr *) &addr; si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; si->channel = IPMI_BMC_CHANNEL; si->lun = 0; /* Fill in an event telling that we have failed. */ msg.netfn = 0x04; /* Sensor or Event. */ msg.cmd = 2; /* Platform event command. */ msg.data = data; msg.data_len = 8; data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ data[1] = 0x03; /* This is for IPMI 1.0. */ data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ /* * Put a few breadcrumbs in. Hopefully later we can add more things * to make the panic events more useful. */ if (str) { data[3] = str[0]; data[6] = str[1]; data[7] = str[2]; } /* For every registered interface, send the event. */ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { if (!intf->handlers) /* Interface is not ready. */ continue; intf->run_to_completion = 1; /* Send the event announcing the panic. */ intf->handlers->set_run_to_completion(intf->send_info, 1); ipmi_panic_request_and_wait(intf, &addr, &msg); } #ifdef CONFIG_IPMI_PANIC_STRING /* * On every interface, dump a bunch of OEM event holding the * string. */ if (!str) return; /* For every registered interface, send the event. */ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { char *p = str; struct ipmi_ipmb_addr *ipmb; int j; if (intf->intf_num == -1) /* Interface was not ready yet. */ continue; /* * intf_num is used as an marker to tell if the * interface is valid. Thus we need a read barrier to * make sure data fetched before checking intf_num * won't be used. */ smp_rmb(); /* * First job here is to figure out where to send the * OEM events. There's no way in IPMI to send OEM * events using an event send command, so we have to * find the SEL to put them in and stick them in * there. */ /* Get capabilities from the get device id. */ intf->local_sel_device = 0; intf->local_event_generator = 0; intf->event_receiver = 0; /* Request the device info from the local MC. */ msg.netfn = IPMI_NETFN_APP_REQUEST; msg.cmd = IPMI_GET_DEVICE_ID_CMD; msg.data = NULL; msg.data_len = 0; intf->null_user_handler = device_id_fetcher; ipmi_panic_request_and_wait(intf, &addr, &msg); if (intf->local_event_generator) { /* Request the event receiver from the local MC. */ msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; msg.data = NULL; msg.data_len = 0; intf->null_user_handler = event_receiver_fetcher; ipmi_panic_request_and_wait(intf, &addr, &msg); } intf->null_user_handler = NULL; /* * Validate the event receiver. The low bit must not * be 1 (it must be a valid IPMB address), it cannot * be zero, and it must not be my address. */ if (((intf->event_receiver & 1) == 0) && (intf->event_receiver != 0) && (intf->event_receiver != intf->channels[0].address)) { /* * The event receiver is valid, send an IPMB * message. */ ipmb = (struct ipmi_ipmb_addr *) &addr; ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; ipmb->channel = 0; /* FIXME - is this right? */ ipmb->lun = intf->event_receiver_lun; ipmb->slave_addr = intf->event_receiver; } else if (intf->local_sel_device) { /* * The event receiver was not valid (or was * me), but I am an SEL device, just dump it * in my SEL. */ si = (struct ipmi_system_interface_addr *) &addr; si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; si->channel = IPMI_BMC_CHANNEL; si->lun = 0; } else continue; /* No where to send the event. */ msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; msg.data = data; msg.data_len = 16; j = 0; while (*p) { int size = strlen(p); if (size > 11) size = 11; data[0] = 0; data[1] = 0; data[2] = 0xf0; /* OEM event without timestamp. */ data[3] = intf->channels[0].address; data[4] = j++; /* sequence # */ /* * Always give 11 bytes, so strncpy will fill * it with zeroes for me. */ strncpy(data+5, p, 11); p += size; ipmi_panic_request_and_wait(intf, &addr, &msg); } } #endif /* CONFIG_IPMI_PANIC_STRING */ } #endif /* CONFIG_IPMI_PANIC_EVENT */ static int has_panicked; static int panic_event(struct notifier_block *this, unsigned long event, void *ptr) { ipmi_smi_t intf; if (has_panicked) return NOTIFY_DONE; has_panicked = 1; /* For every registered interface, set it to run to completion. */ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { if (!intf->handlers) /* Interface is not ready. */ continue; intf->run_to_completion = 1; intf->handlers->set_run_to_completion(intf->send_info, 1); } #ifdef CONFIG_IPMI_PANIC_EVENT send_panic_events(ptr); #endif return NOTIFY_DONE; } static struct notifier_block panic_block = { .notifier_call = panic_event, .next = NULL, .priority = 200 /* priority: INT_MAX >= x >= 0 */ }; static int ipmi_init_msghandler(void) { int rv; if (initialized) return 0; rv = driver_register(&ipmidriver.driver); if (rv) { printk(KERN_ERR PFX "Could not register IPMI driver\n"); return rv; } printk(KERN_INFO "ipmi message handler version " IPMI_DRIVER_VERSION "\n"); #ifdef CONFIG_PROC_FS proc_ipmi_root = proc_mkdir("ipmi", NULL); if (!proc_ipmi_root) { printk(KERN_ERR PFX "Unable to create IPMI proc dir"); return -ENOMEM; } #endif /* CONFIG_PROC_FS */ setup_timer(&ipmi_timer, ipmi_timeout, 0); mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); atomic_notifier_chain_register(&panic_notifier_list, &panic_block); initialized = 1; return 0; } static int __init ipmi_init_msghandler_mod(void) { ipmi_init_msghandler(); return 0; } static void __exit cleanup_ipmi(void) { int count; if (!initialized) return; atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); /* * This can't be called if any interfaces exist, so no worry * about shutting down the interfaces. */ /* * Tell the timer to stop, then wait for it to stop. This * avoids problems with race conditions removing the timer * here. */ atomic_inc(&stop_operation); del_timer_sync(&ipmi_timer); #ifdef CONFIG_PROC_FS remove_proc_entry(proc_ipmi_root->name, NULL); #endif /* CONFIG_PROC_FS */ driver_unregister(&ipmidriver.driver); initialized = 0; /* Check for buffer leaks. */ count = atomic_read(&smi_msg_inuse_count); if (count != 0) printk(KERN_WARNING PFX "SMI message count %d at exit\n", count); count = atomic_read(&recv_msg_inuse_count); if (count != 0) printk(KERN_WARNING PFX "recv message count %d at exit\n", count); } module_exit(cleanup_ipmi); module_init(ipmi_init_msghandler_mod); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI" " interface."); MODULE_VERSION(IPMI_DRIVER_VERSION);
gpl-2.0
novaspirit/tf101-nv-linux
fs/ext2/file.c
8007
2829
/* * linux/fs/ext2/file.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/file.c * * Copyright (C) 1991, 1992 Linus Torvalds * * ext2 fs regular file handling primitives * * 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) */ #include <linux/time.h> #include <linux/pagemap.h> #include <linux/quotaops.h> #include "ext2.h" #include "xattr.h" #include "acl.h" /* * Called when filp is released. This happens when all file descriptors * for a single struct file are closed. Note that different open() calls * for the same file yield different struct file structures. */ static int ext2_release_file (struct inode * inode, struct file * filp) { if (filp->f_mode & FMODE_WRITE) { mutex_lock(&EXT2_I(inode)->truncate_mutex); ext2_discard_reservation(inode); mutex_unlock(&EXT2_I(inode)->truncate_mutex); } return 0; } int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync) { int ret; struct super_block *sb = file->f_mapping->host->i_sb; struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; ret = generic_file_fsync(file, start, end, datasync); if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) { /* We don't really know where the IO error happened... */ ext2_error(sb, __func__, "detected IO error when writing metadata buffers"); ret = -EIO; } return ret; } /* * We have mostly NULL's here: the current defaults are ok for * the ext2 filesystem. */ const struct file_operations ext2_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .write = do_sync_write, .aio_read = generic_file_aio_read, .aio_write = generic_file_aio_write, .unlocked_ioctl = ext2_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext2_compat_ioctl, #endif .mmap = generic_file_mmap, .open = dquot_file_open, .release = ext2_release_file, .fsync = ext2_fsync, .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, }; #ifdef CONFIG_EXT2_FS_XIP const struct file_operations ext2_xip_file_operations = { .llseek = generic_file_llseek, .read = xip_file_read, .write = xip_file_write, .unlocked_ioctl = ext2_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext2_compat_ioctl, #endif .mmap = xip_file_mmap, .open = dquot_file_open, .release = ext2_release_file, .fsync = ext2_fsync, }; #endif const struct inode_operations ext2_file_inode_operations = { #ifdef CONFIG_EXT2_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ext2_listxattr, .removexattr = generic_removexattr, #endif .setattr = ext2_setattr, .get_acl = ext2_get_acl, .fiemap = ext2_fiemap, };
gpl-2.0
Smando87/smdk4412_kernel
arch/powerpc/oprofile/cell/spu_task_sync.c
8519
18086
/* * Cell Broadband Engine OProfile Support * * (C) Copyright IBM Corporation 2006 * * Author: Maynard Johnson <maynardj@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* The purpose of this file is to handle SPU event task switching * and to record SPU context information into the OProfile * event buffer. * * Additionally, the spu_sync_buffer function is provided as a helper * for recoding actual SPU program counter samples to the event buffer. */ #include <linux/dcookies.h> #include <linux/kref.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/numa.h> #include <linux/oprofile.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "pr_util.h" #define RELEASE_ALL 9999 static DEFINE_SPINLOCK(buffer_lock); static DEFINE_SPINLOCK(cache_lock); static int num_spu_nodes; int spu_prof_num_nodes; struct spu_buffer spu_buff[MAX_NUMNODES * SPUS_PER_NODE]; struct delayed_work spu_work; static unsigned max_spu_buff; static void spu_buff_add(unsigned long int value, int spu) { /* spu buff is a circular buffer. Add entries to the * head. Head is the index to store the next value. * The buffer is full when there is one available entry * in the queue, i.e. head and tail can't be equal. * That way we can tell the difference between the * buffer being full versus empty. * * ASSUPTION: the buffer_lock is held when this function * is called to lock the buffer, head and tail. */ int full = 1; if (spu_buff[spu].head >= spu_buff[spu].tail) { if ((spu_buff[spu].head - spu_buff[spu].tail) < (max_spu_buff - 1)) full = 0; } else if (spu_buff[spu].tail > spu_buff[spu].head) { if ((spu_buff[spu].tail - spu_buff[spu].head) > 1) full = 0; } if (!full) { spu_buff[spu].buff[spu_buff[spu].head] = value; spu_buff[spu].head++; if (spu_buff[spu].head >= max_spu_buff) spu_buff[spu].head = 0; } else { /* From the user's perspective make the SPU buffer * size management/overflow look like we are using * per cpu buffers. The user uses the same * per cpu parameter to adjust the SPU buffer size. * Increment the sample_lost_overflow to inform * the user the buffer size needs to be increased. */ oprofile_cpu_buffer_inc_smpl_lost(); } } /* This function copies the per SPU buffers to the * OProfile kernel buffer. */ void sync_spu_buff(void) { int spu; unsigned long flags; int curr_head; for (spu = 0; spu < num_spu_nodes; spu++) { /* In case there was an issue and the buffer didn't * get created skip it. */ if (spu_buff[spu].buff == NULL) continue; /* Hold the lock to make sure the head/tail * doesn't change while spu_buff_add() is * deciding if the buffer is full or not. * Being a little paranoid. */ spin_lock_irqsave(&buffer_lock, flags); curr_head = spu_buff[spu].head; spin_unlock_irqrestore(&buffer_lock, flags); /* Transfer the current contents to the kernel buffer. * data can still be added to the head of the buffer. */ oprofile_put_buff(spu_buff[spu].buff, spu_buff[spu].tail, curr_head, max_spu_buff); spin_lock_irqsave(&buffer_lock, flags); spu_buff[spu].tail = curr_head; spin_unlock_irqrestore(&buffer_lock, flags); } } static void wq_sync_spu_buff(struct work_struct *work) { /* move data from spu buffers to kernel buffer */ sync_spu_buff(); /* only reschedule if profiling is not done */ if (spu_prof_running) schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE); } /* Container for caching information about an active SPU task. */ struct cached_info { struct vma_to_fileoffset_map *map; struct spu *the_spu; /* needed to access pointer to local_store */ struct kref cache_ref; }; static struct cached_info *spu_info[MAX_NUMNODES * 8]; static void destroy_cached_info(struct kref *kref) { struct cached_info *info; info = container_of(kref, struct cached_info, cache_ref); vma_map_free(info->map); kfree(info); module_put(THIS_MODULE); } /* Return the cached_info for the passed SPU number. * ATTENTION: Callers are responsible for obtaining the * cache_lock if needed prior to invoking this function. */ static struct cached_info *get_cached_info(struct spu *the_spu, int spu_num) { struct kref *ref; struct cached_info *ret_info; if (spu_num >= num_spu_nodes) { printk(KERN_ERR "SPU_PROF: " "%s, line %d: Invalid index %d into spu info cache\n", __func__, __LINE__, spu_num); ret_info = NULL; goto out; } if (!spu_info[spu_num] && the_spu) { ref = spu_get_profile_private_kref(the_spu->ctx); if (ref) { spu_info[spu_num] = container_of(ref, struct cached_info, cache_ref); kref_get(&spu_info[spu_num]->cache_ref); } } ret_info = spu_info[spu_num]; out: return ret_info; } /* Looks for cached info for the passed spu. If not found, the * cached info is created for the passed spu. * Returns 0 for success; otherwise, -1 for error. */ static int prepare_cached_spu_info(struct spu *spu, unsigned long objectId) { unsigned long flags; struct vma_to_fileoffset_map *new_map; int retval = 0; struct cached_info *info; /* We won't bother getting cache_lock here since * don't do anything with the cached_info that's returned. */ info = get_cached_info(spu, spu->number); if (info) { pr_debug("Found cached SPU info.\n"); goto out; } /* Create cached_info and set spu_info[spu->number] to point to it. * spu->number is a system-wide value, not a per-node value. */ info = kzalloc(sizeof(struct cached_info), GFP_KERNEL); if (!info) { printk(KERN_ERR "SPU_PROF: " "%s, line %d: create vma_map failed\n", __func__, __LINE__); retval = -ENOMEM; goto err_alloc; } new_map = create_vma_map(spu, objectId); if (!new_map) { printk(KERN_ERR "SPU_PROF: " "%s, line %d: create vma_map failed\n", __func__, __LINE__); retval = -ENOMEM; goto err_alloc; } pr_debug("Created vma_map\n"); info->map = new_map; info->the_spu = spu; kref_init(&info->cache_ref); spin_lock_irqsave(&cache_lock, flags); spu_info[spu->number] = info; /* Increment count before passing off ref to SPUFS. */ kref_get(&info->cache_ref); /* We increment the module refcount here since SPUFS is * responsible for the final destruction of the cached_info, * and it must be able to access the destroy_cached_info() * function defined in the OProfile module. We decrement * the module refcount in destroy_cached_info. */ try_module_get(THIS_MODULE); spu_set_profile_private_kref(spu->ctx, &info->cache_ref, destroy_cached_info); spin_unlock_irqrestore(&cache_lock, flags); goto out; err_alloc: kfree(info); out: return retval; } /* * NOTE: The caller is responsible for locking the * cache_lock prior to calling this function. */ static int release_cached_info(int spu_index) { int index, end; if (spu_index == RELEASE_ALL) { end = num_spu_nodes; index = 0; } else { if (spu_index >= num_spu_nodes) { printk(KERN_ERR "SPU_PROF: " "%s, line %d: " "Invalid index %d into spu info cache\n", __func__, __LINE__, spu_index); goto out; } end = spu_index + 1; index = spu_index; } for (; index < end; index++) { if (spu_info[index]) { kref_put(&spu_info[index]->cache_ref, destroy_cached_info); spu_info[index] = NULL; } } out: return 0; } /* The source code for fast_get_dcookie was "borrowed" * from drivers/oprofile/buffer_sync.c. */ /* Optimisation. We can manage without taking the dcookie sem * because we cannot reach this code without at least one * dcookie user still being registered (namely, the reader * of the event buffer). */ static inline unsigned long fast_get_dcookie(struct path *path) { unsigned long cookie; if (path->dentry->d_flags & DCACHE_COOKIE) return (unsigned long)path->dentry; get_dcookie(path, &cookie); return cookie; } /* Look up the dcookie for the task's first VM_EXECUTABLE mapping, * which corresponds loosely to "application name". Also, determine * the offset for the SPU ELF object. If computed offset is * non-zero, it implies an embedded SPU object; otherwise, it's a * separate SPU binary, in which case we retrieve it's dcookie. * For the embedded case, we must determine if SPU ELF is embedded * in the executable application or another file (i.e., shared lib). * If embedded in a shared lib, we must get the dcookie and return * that to the caller. */ static unsigned long get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp, unsigned long *spu_bin_dcookie, unsigned long spu_ref) { unsigned long app_cookie = 0; unsigned int my_offset = 0; struct file *app = NULL; struct vm_area_struct *vma; struct mm_struct *mm = spu->mm; if (!mm) goto out; down_read(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) { if (!vma->vm_file) continue; if (!(vma->vm_flags & VM_EXECUTABLE)) continue; app_cookie = fast_get_dcookie(&vma->vm_file->f_path); pr_debug("got dcookie for %s\n", vma->vm_file->f_dentry->d_name.name); app = vma->vm_file; break; } for (vma = mm->mmap; vma; vma = vma->vm_next) { if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref) continue; my_offset = spu_ref - vma->vm_start; if (!vma->vm_file) goto fail_no_image_cookie; pr_debug("Found spu ELF at %X(object-id:%lx) for file %s\n", my_offset, spu_ref, vma->vm_file->f_dentry->d_name.name); *offsetp = my_offset; break; } *spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path); pr_debug("got dcookie for %s\n", vma->vm_file->f_dentry->d_name.name); up_read(&mm->mmap_sem); out: return app_cookie; fail_no_image_cookie: up_read(&mm->mmap_sem); printk(KERN_ERR "SPU_PROF: " "%s, line %d: Cannot find dcookie for SPU binary\n", __func__, __LINE__); goto out; } /* This function finds or creates cached context information for the * passed SPU and records SPU context information into the OProfile * event buffer. */ static int process_context_switch(struct spu *spu, unsigned long objectId) { unsigned long flags; int retval; unsigned int offset = 0; unsigned long spu_cookie = 0, app_dcookie; retval = prepare_cached_spu_info(spu, objectId); if (retval) goto out; /* Get dcookie first because a mutex_lock is taken in that * code path, so interrupts must not be disabled. */ app_dcookie = get_exec_dcookie_and_offset(spu, &offset, &spu_cookie, objectId); if (!app_dcookie || !spu_cookie) { retval = -ENOENT; goto out; } /* Record context info in event buffer */ spin_lock_irqsave(&buffer_lock, flags); spu_buff_add(ESCAPE_CODE, spu->number); spu_buff_add(SPU_CTX_SWITCH_CODE, spu->number); spu_buff_add(spu->number, spu->number); spu_buff_add(spu->pid, spu->number); spu_buff_add(spu->tgid, spu->number); spu_buff_add(app_dcookie, spu->number); spu_buff_add(spu_cookie, spu->number); spu_buff_add(offset, spu->number); /* Set flag to indicate SPU PC data can now be written out. If * the SPU program counter data is seen before an SPU context * record is seen, the postprocessing will fail. */ spu_buff[spu->number].ctx_sw_seen = 1; spin_unlock_irqrestore(&buffer_lock, flags); smp_wmb(); /* insure spu event buffer updates are written */ /* don't want entries intermingled... */ out: return retval; } /* * This function is invoked on either a bind_context or unbind_context. * If called for an unbind_context, the val arg is 0; otherwise, * it is the object-id value for the spu context. * The data arg is of type 'struct spu *'. */ static int spu_active_notify(struct notifier_block *self, unsigned long val, void *data) { int retval; unsigned long flags; struct spu *the_spu = data; pr_debug("SPU event notification arrived\n"); if (!val) { spin_lock_irqsave(&cache_lock, flags); retval = release_cached_info(the_spu->number); spin_unlock_irqrestore(&cache_lock, flags); } else { retval = process_context_switch(the_spu, val); } return retval; } static struct notifier_block spu_active = { .notifier_call = spu_active_notify, }; static int number_of_online_nodes(void) { u32 cpu; u32 tmp; int nodes = 0; for_each_online_cpu(cpu) { tmp = cbe_cpu_to_node(cpu) + 1; if (tmp > nodes) nodes++; } return nodes; } static int oprofile_spu_buff_create(void) { int spu; max_spu_buff = oprofile_get_cpu_buffer_size(); for (spu = 0; spu < num_spu_nodes; spu++) { /* create circular buffers to store the data in. * use locks to manage accessing the buffers */ spu_buff[spu].head = 0; spu_buff[spu].tail = 0; /* * Create a buffer for each SPU. Can't reliably * create a single buffer for all spus due to not * enough contiguous kernel memory. */ spu_buff[spu].buff = kzalloc((max_spu_buff * sizeof(unsigned long)), GFP_KERNEL); if (!spu_buff[spu].buff) { printk(KERN_ERR "SPU_PROF: " "%s, line %d: oprofile_spu_buff_create " "failed to allocate spu buffer %d.\n", __func__, __LINE__, spu); /* release the spu buffers that have been allocated */ while (spu >= 0) { kfree(spu_buff[spu].buff); spu_buff[spu].buff = 0; spu--; } return -ENOMEM; } } return 0; } /* The main purpose of this function is to synchronize * OProfile with SPUFS by registering to be notified of * SPU task switches. * * NOTE: When profiling SPUs, we must ensure that only * spu_sync_start is invoked and not the generic sync_start * in drivers/oprofile/oprof.c. A return value of * SKIP_GENERIC_SYNC or SYNC_START_ERROR will * accomplish this. */ int spu_sync_start(void) { int spu; int ret = SKIP_GENERIC_SYNC; int register_ret; unsigned long flags = 0; spu_prof_num_nodes = number_of_online_nodes(); num_spu_nodes = spu_prof_num_nodes * 8; INIT_DELAYED_WORK(&spu_work, wq_sync_spu_buff); /* create buffer for storing the SPU data to put in * the kernel buffer. */ ret = oprofile_spu_buff_create(); if (ret) goto out; spin_lock_irqsave(&buffer_lock, flags); for (spu = 0; spu < num_spu_nodes; spu++) { spu_buff_add(ESCAPE_CODE, spu); spu_buff_add(SPU_PROFILING_CODE, spu); spu_buff_add(num_spu_nodes, spu); } spin_unlock_irqrestore(&buffer_lock, flags); for (spu = 0; spu < num_spu_nodes; spu++) { spu_buff[spu].ctx_sw_seen = 0; spu_buff[spu].last_guard_val = 0; } /* Register for SPU events */ register_ret = spu_switch_event_register(&spu_active); if (register_ret) { ret = SYNC_START_ERROR; goto out; } pr_debug("spu_sync_start -- running.\n"); out: return ret; } /* Record SPU program counter samples to the oprofile event buffer. */ void spu_sync_buffer(int spu_num, unsigned int *samples, int num_samples) { unsigned long long file_offset; unsigned long flags; int i; struct vma_to_fileoffset_map *map; struct spu *the_spu; unsigned long long spu_num_ll = spu_num; unsigned long long spu_num_shifted = spu_num_ll << 32; struct cached_info *c_info; /* We need to obtain the cache_lock here because it's * possible that after getting the cached_info, the SPU job * corresponding to this cached_info may end, thus resulting * in the destruction of the cached_info. */ spin_lock_irqsave(&cache_lock, flags); c_info = get_cached_info(NULL, spu_num); if (!c_info) { /* This legitimately happens when the SPU task ends before all * samples are recorded. * No big deal -- so we just drop a few samples. */ pr_debug("SPU_PROF: No cached SPU contex " "for SPU #%d. Dropping samples.\n", spu_num); goto out; } map = c_info->map; the_spu = c_info->the_spu; spin_lock(&buffer_lock); for (i = 0; i < num_samples; i++) { unsigned int sample = *(samples+i); int grd_val = 0; file_offset = 0; if (sample == 0) continue; file_offset = vma_map_lookup( map, sample, the_spu, &grd_val); /* If overlays are used by this SPU application, the guard * value is non-zero, indicating which overlay section is in * use. We need to discard samples taken during the time * period which an overlay occurs (i.e., guard value changes). */ if (grd_val && grd_val != spu_buff[spu_num].last_guard_val) { spu_buff[spu_num].last_guard_val = grd_val; /* Drop the rest of the samples. */ break; } /* We must ensure that the SPU context switch has been written * out before samples for the SPU. Otherwise, the SPU context * information is not available and the postprocessing of the * SPU PC will fail with no available anonymous map information. */ if (spu_buff[spu_num].ctx_sw_seen) spu_buff_add((file_offset | spu_num_shifted), spu_num); } spin_unlock(&buffer_lock); out: spin_unlock_irqrestore(&cache_lock, flags); } int spu_sync_stop(void) { unsigned long flags = 0; int ret; int k; ret = spu_switch_event_unregister(&spu_active); if (ret) printk(KERN_ERR "SPU_PROF: " "%s, line %d: spu_switch_event_unregister " \ "returned %d\n", __func__, __LINE__, ret); /* flush any remaining data in the per SPU buffers */ sync_spu_buff(); spin_lock_irqsave(&cache_lock, flags); ret = release_cached_info(RELEASE_ALL); spin_unlock_irqrestore(&cache_lock, flags); /* remove scheduled work queue item rather then waiting * for every queued entry to execute. Then flush pending * system wide buffer to event buffer. */ cancel_delayed_work(&spu_work); for (k = 0; k < num_spu_nodes; k++) { spu_buff[k].ctx_sw_seen = 0; /* * spu_sys_buff will be null if there was a problem * allocating the buffer. Only delete if it exists. */ kfree(spu_buff[k].buff); spu_buff[k].buff = 0; } pr_debug("spu_sync_stop -- done.\n"); return ret; }
gpl-2.0
Beeko/android_kernel_samsung_d2
arch/m68k/sun3/prom/init.c
9287
1774
/* * init.c: Initialize internal variables used by the PROM * library functions. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/kernel.h> #include <linux/init.h> #include <asm/openprom.h> #include <asm/oplib.h> struct linux_romvec *romvec; enum prom_major_version prom_vers; unsigned int prom_rev, prom_prev; /* The root node of the prom device tree. */ int prom_root_node; /* Pointer to the device tree operations structure. */ struct linux_nodeops *prom_nodeops; /* You must call prom_init() before you attempt to use any of the * routines in the prom library. It returns 0 on success, 1 on * failure. It gets passed the pointer to the PROM vector. */ extern void prom_meminit(void); extern void prom_ranges_init(void); void __init prom_init(struct linux_romvec *rp) { romvec = rp; #ifndef CONFIG_SUN3 switch(romvec->pv_romvers) { case 0: prom_vers = PROM_V0; break; case 2: prom_vers = PROM_V2; break; case 3: prom_vers = PROM_V3; break; case 4: prom_vers = PROM_P1275; prom_printf("PROMLIB: Sun IEEE Prom not supported yet\n"); prom_halt(); break; default: prom_printf("PROMLIB: Bad PROM version %d\n", romvec->pv_romvers); prom_halt(); break; }; prom_rev = romvec->pv_plugin_revision; prom_prev = romvec->pv_printrev; prom_nodeops = romvec->pv_nodeops; prom_root_node = prom_getsibling(0); if((prom_root_node == 0) || (prom_root_node == -1)) prom_halt(); if((((unsigned long) prom_nodeops) == 0) || (((unsigned long) prom_nodeops) == -1)) prom_halt(); prom_meminit(); prom_ranges_init(); #endif // printk("PROMLIB: Sun Boot Prom Version %d Revision %d\n", // romvec->pv_romvers, prom_rev); /* Initialization successful. */ return; }
gpl-2.0
faux123/NX403A
fs/jffs2/write.c
9287
21540
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/fs.h> #include <linux/crc32.h> #include <linux/pagemap.h> #include <linux/mtd/mtd.h> #include "nodelist.h" #include "compr.h" int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri) { struct jffs2_inode_cache *ic; ic = jffs2_alloc_inode_cache(); if (!ic) { return -ENOMEM; } memset(ic, 0, sizeof(*ic)); f->inocache = ic; f->inocache->pino_nlink = 1; /* Will be overwritten shortly for directories */ f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; f->inocache->state = INO_STATE_PRESENT; jffs2_add_ino_cache(c, f->inocache); jffs2_dbg(1, "%s(): Assigned ino# %d\n", __func__, f->inocache->ino); ri->ino = cpu_to_je32(f->inocache->ino); ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); ri->totlen = cpu_to_je32(PAD(sizeof(*ri))); ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)); ri->mode = cpu_to_jemode(mode); f->highest_version = 1; ri->version = cpu_to_je32(f->highest_version); return 0; } /* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it, write it to the flash, link it into the existing inode/fragment list */ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, int alloc_mode) { struct jffs2_full_dnode *fn; size_t retlen; uint32_t flash_ofs; struct kvec vecs[2]; int ret; int retried = 0; unsigned long cnt = 2; D1(if(je32_to_cpu(ri->hdr_crc) != crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)) { pr_crit("Eep. CRC not correct in jffs2_write_dnode()\n"); BUG(); } ); vecs[0].iov_base = ri; vecs[0].iov_len = sizeof(*ri); vecs[1].iov_base = (unsigned char *)data; vecs[1].iov_len = datalen; if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) { pr_warn("%s(): ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", __func__, je32_to_cpu(ri->totlen), sizeof(*ri), datalen); } fn = jffs2_alloc_full_dnode(); if (!fn) return ERR_PTR(-ENOMEM); /* check number of valid vecs */ if (!datalen || !data) cnt = 1; retry: flash_ofs = write_ofs(c); jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) { BUG_ON(!retried); jffs2_dbg(1, "%s(): dnode_version %d, highest version %d -> updating dnode\n", __func__, je32_to_cpu(ri->version), f->highest_version); ri->version = cpu_to_je32(++f->highest_version); ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); } ret = jffs2_flash_writev(c, vecs, cnt, flash_ofs, &retlen, (alloc_mode==ALLOC_GC)?0:f->inocache->ino); if (ret || (retlen != sizeof(*ri) + datalen)) { pr_notice("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", sizeof(*ri) + datalen, flash_ofs, ret, retlen); /* Mark the space as dirtied */ if (retlen) { /* Don't change raw->size to match retlen. We may have written the node header already, and only the data will seem corrupted, in which case the scan would skip over any node we write before the original intended end of this node */ jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*ri)+datalen), NULL); } else { pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs); } if (!retried && alloc_mode != ALLOC_NORETRY) { /* Try to reallocate space and retry */ uint32_t dummy; struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; retried = 1; jffs2_dbg(1, "Retrying failed write.\n"); jffs2_dbg_acct_sanity_check(c,jeb); jffs2_dbg_acct_paranoia_check(c, jeb); if (alloc_mode == ALLOC_GC) { ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &dummy, JFFS2_SUMMARY_INODE_SIZE); } else { /* Locking pain */ mutex_unlock(&f->sem); jffs2_complete_reservation(c); ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &dummy, alloc_mode, JFFS2_SUMMARY_INODE_SIZE); mutex_lock(&f->sem); } if (!ret) { flash_ofs = write_ofs(c); jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n", flash_ofs); jffs2_dbg_acct_sanity_check(c,jeb); jffs2_dbg_acct_paranoia_check(c, jeb); goto retry; } jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n", ret); } /* Release the full_dnode which is now useless, and return */ jffs2_free_full_dnode(fn); return ERR_PTR(ret?ret:-EIO); } /* Mark the space used */ /* If node covers at least a whole page, or if it starts at the beginning of a page and runs to the end of the file, or if it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. */ if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) || ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) && (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) { flash_ofs |= REF_PRISTINE; } else { flash_ofs |= REF_NORMAL; } fn->raw = jffs2_add_physical_node_ref(c, flash_ofs, PAD(sizeof(*ri)+datalen), f->inocache); if (IS_ERR(fn->raw)) { void *hold_err = fn->raw; /* Release the full_dnode which is now useless, and return */ jffs2_free_full_dnode(fn); return ERR_CAST(hold_err); } fn->ofs = je32_to_cpu(ri->offset); fn->size = je32_to_cpu(ri->dsize); fn->frags = 0; jffs2_dbg(1, "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", flash_ofs & ~3, flash_ofs & 3, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc), je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen)); if (retried) { jffs2_dbg_acct_sanity_check(c,NULL); } return fn; } struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, int alloc_mode) { struct jffs2_full_dirent *fd; size_t retlen; struct kvec vecs[2]; uint32_t flash_ofs; int retried = 0; int ret; jffs2_dbg(1, "%s(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", __func__, je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), je32_to_cpu(rd->name_crc)); D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) { pr_crit("Eep. CRC not correct in jffs2_write_dirent()\n"); BUG(); }); if (strnlen(name, namelen) != namelen) { /* This should never happen, but seems to have done on at least one occasion: https://dev.laptop.org/ticket/4184 */ pr_crit("Error in jffs2_write_dirent() -- name contains zero bytes!\n"); pr_crit("Directory inode #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x\n", je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), je32_to_cpu(rd->name_crc)); WARN_ON(1); return ERR_PTR(-EIO); } vecs[0].iov_base = rd; vecs[0].iov_len = sizeof(*rd); vecs[1].iov_base = (unsigned char *)name; vecs[1].iov_len = namelen; fd = jffs2_alloc_full_dirent(namelen+1); if (!fd) return ERR_PTR(-ENOMEM); fd->version = je32_to_cpu(rd->version); fd->ino = je32_to_cpu(rd->ino); fd->nhash = full_name_hash(name, namelen); fd->type = rd->type; memcpy(fd->name, name, namelen); fd->name[namelen]=0; retry: flash_ofs = write_ofs(c); jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) { BUG_ON(!retried); jffs2_dbg(1, "%s(): dirent_version %d, highest version %d -> updating dirent\n", __func__, je32_to_cpu(rd->version), f->highest_version); rd->version = cpu_to_je32(++f->highest_version); fd->version = je32_to_cpu(rd->version); rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); } ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen, (alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino)); if (ret || (retlen != sizeof(*rd) + namelen)) { pr_notice("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", sizeof(*rd) + namelen, flash_ofs, ret, retlen); /* Mark the space as dirtied */ if (retlen) { jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*rd)+namelen), NULL); } else { pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs); } if (!retried) { /* Try to reallocate space and retry */ uint32_t dummy; struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; retried = 1; jffs2_dbg(1, "Retrying failed write.\n"); jffs2_dbg_acct_sanity_check(c,jeb); jffs2_dbg_acct_paranoia_check(c, jeb); if (alloc_mode == ALLOC_GC) { ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &dummy, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); } else { /* Locking pain */ mutex_unlock(&f->sem); jffs2_complete_reservation(c); ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &dummy, alloc_mode, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); mutex_lock(&f->sem); } if (!ret) { flash_ofs = write_ofs(c); jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write\n", flash_ofs); jffs2_dbg_acct_sanity_check(c,jeb); jffs2_dbg_acct_paranoia_check(c, jeb); goto retry; } jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n", ret); } /* Release the full_dnode which is now useless, and return */ jffs2_free_full_dirent(fd); return ERR_PTR(ret?ret:-EIO); } /* Mark the space used */ fd->raw = jffs2_add_physical_node_ref(c, flash_ofs | dirent_node_state(rd), PAD(sizeof(*rd)+namelen), f->inocache); if (IS_ERR(fd->raw)) { void *hold_err = fd->raw; /* Release the full_dirent which is now useless, and return */ jffs2_free_full_dirent(fd); return ERR_CAST(hold_err); } if (retried) { jffs2_dbg_acct_sanity_check(c,NULL); } return fd; } /* The OS-specific code fills in the metadata in the jffs2_raw_inode for us, so that we don't have to go digging in struct inode or its equivalent. It should set: mode, uid, gid, (starting)isize, atime, ctime, mtime */ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, unsigned char *buf, uint32_t offset, uint32_t writelen, uint32_t *retlen) { int ret = 0; uint32_t writtenlen = 0; jffs2_dbg(1, "%s(): Ino #%u, ofs 0x%x, len 0x%x\n", __func__, f->inocache->ino, offset, writelen); while(writelen) { struct jffs2_full_dnode *fn; unsigned char *comprbuf = NULL; uint16_t comprtype = JFFS2_COMPR_NONE; uint32_t alloclen; uint32_t datalen, cdatalen; int retried = 0; retry: jffs2_dbg(2, "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset); ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); if (ret) { jffs2_dbg(1, "jffs2_reserve_space returned %d\n", ret); break; } mutex_lock(&f->sem); datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1))); cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen); comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen); ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); ri->totlen = cpu_to_je32(sizeof(*ri) + cdatalen); ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)); ri->ino = cpu_to_je32(f->inocache->ino); ri->version = cpu_to_je32(++f->highest_version); ri->isize = cpu_to_je32(max(je32_to_cpu(ri->isize), offset + datalen)); ri->offset = cpu_to_je32(offset); ri->csize = cpu_to_je32(cdatalen); ri->dsize = cpu_to_je32(datalen); ri->compr = comprtype & 0xff; ri->usercompr = (comprtype >> 8 ) & 0xff; ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); ri->data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen)); fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, ALLOC_NORETRY); jffs2_free_comprbuf(comprbuf, buf); if (IS_ERR(fn)) { ret = PTR_ERR(fn); mutex_unlock(&f->sem); jffs2_complete_reservation(c); if (!retried) { /* Write error to be retried */ retried = 1; jffs2_dbg(1, "Retrying node write in jffs2_write_inode_range()\n"); goto retry; } break; } ret = jffs2_add_full_dnode_to_inode(c, f, fn); if (f->metadata) { jffs2_mark_node_obsolete(c, f->metadata->raw); jffs2_free_full_dnode(f->metadata); f->metadata = NULL; } if (ret) { /* Eep */ jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", ret); jffs2_mark_node_obsolete(c, fn->raw); jffs2_free_full_dnode(fn); mutex_unlock(&f->sem); jffs2_complete_reservation(c); break; } mutex_unlock(&f->sem); jffs2_complete_reservation(c); if (!datalen) { pr_warn("Eep. We didn't actually write any data in jffs2_write_inode_range()\n"); ret = -EIO; break; } jffs2_dbg(1, "increasing writtenlen by %d\n", datalen); writtenlen += datalen; offset += datalen; writelen -= datalen; buf += datalen; } *retlen = writtenlen; return ret; } int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const struct qstr *qstr) { struct jffs2_raw_dirent *rd; struct jffs2_full_dnode *fn; struct jffs2_full_dirent *fd; uint32_t alloclen; int ret; /* Try to reserve enough space for both node and dirent. * Just the node will do for now, though */ ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); jffs2_dbg(1, "%s(): reserved 0x%x bytes\n", __func__, alloclen); if (ret) return ret; mutex_lock(&f->sem); ri->data_crc = cpu_to_je32(0); ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL); jffs2_dbg(1, "jffs2_do_create created file with mode 0x%x\n", jemode_to_cpu(ri->mode)); if (IS_ERR(fn)) { jffs2_dbg(1, "jffs2_write_dnode() failed\n"); /* Eeek. Wave bye bye */ mutex_unlock(&f->sem); jffs2_complete_reservation(c); return PTR_ERR(fn); } /* No data here. Only a metadata node, which will be obsoleted by the first data write */ f->metadata = fn; mutex_unlock(&f->sem); jffs2_complete_reservation(c); ret = jffs2_init_security(&f->vfs_inode, &dir_f->vfs_inode, qstr); if (ret) return ret; ret = jffs2_init_acl_post(&f->vfs_inode); if (ret) return ret; ret = jffs2_reserve_space(c, sizeof(*rd)+qstr->len, &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(qstr->len)); if (ret) { /* Eep. */ jffs2_dbg(1, "jffs2_reserve_space() for dirent failed\n"); return ret; } rd = jffs2_alloc_raw_dirent(); if (!rd) { /* Argh. Now we treat it like a normal delete */ jffs2_complete_reservation(c); return -ENOMEM; } mutex_lock(&dir_f->sem); rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); rd->totlen = cpu_to_je32(sizeof(*rd) + qstr->len); rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); rd->pino = cpu_to_je32(dir_f->inocache->ino); rd->version = cpu_to_je32(++dir_f->highest_version); rd->ino = ri->ino; rd->mctime = ri->ctime; rd->nsize = qstr->len; rd->type = DT_REG; rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); rd->name_crc = cpu_to_je32(crc32(0, qstr->name, qstr->len)); fd = jffs2_write_dirent(c, dir_f, rd, qstr->name, qstr->len, ALLOC_NORMAL); jffs2_free_raw_dirent(rd); if (IS_ERR(fd)) { /* dirent failed to write. Delete the inode normally as if it were the final unlink() */ jffs2_complete_reservation(c); mutex_unlock(&dir_f->sem); return PTR_ERR(fd); } /* Link the fd into the inode's list, obsoleting an old one if necessary. */ jffs2_add_fd_to_list(c, fd, &dir_f->dents); jffs2_complete_reservation(c); mutex_unlock(&dir_f->sem); return 0; } int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, int namelen, struct jffs2_inode_info *dead_f, uint32_t time) { struct jffs2_raw_dirent *rd; struct jffs2_full_dirent *fd; uint32_t alloclen; int ret; if (!jffs2_can_mark_obsolete(c)) { /* We can't mark stuff obsolete on the medium. We need to write a deletion dirent */ rd = jffs2_alloc_raw_dirent(); if (!rd) return -ENOMEM; ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, ALLOC_DELETION, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); if (ret) { jffs2_free_raw_dirent(rd); return ret; } mutex_lock(&dir_f->sem); /* Build a deletion node */ rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); rd->pino = cpu_to_je32(dir_f->inocache->ino); rd->version = cpu_to_je32(++dir_f->highest_version); rd->ino = cpu_to_je32(0); rd->mctime = cpu_to_je32(time); rd->nsize = namelen; rd->type = DT_UNKNOWN; rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_DELETION); jffs2_free_raw_dirent(rd); if (IS_ERR(fd)) { jffs2_complete_reservation(c); mutex_unlock(&dir_f->sem); return PTR_ERR(fd); } /* File it. This will mark the old one obsolete. */ jffs2_add_fd_to_list(c, fd, &dir_f->dents); mutex_unlock(&dir_f->sem); } else { uint32_t nhash = full_name_hash(name, namelen); fd = dir_f->dents; /* We don't actually want to reserve any space, but we do want to be holding the alloc_sem when we write to flash */ mutex_lock(&c->alloc_sem); mutex_lock(&dir_f->sem); for (fd = dir_f->dents; fd; fd = fd->next) { if (fd->nhash == nhash && !memcmp(fd->name, name, namelen) && !fd->name[namelen]) { jffs2_dbg(1, "Marking old dirent node (ino #%u) @%08x obsolete\n", fd->ino, ref_offset(fd->raw)); jffs2_mark_node_obsolete(c, fd->raw); /* We don't want to remove it from the list immediately, because that screws up getdents()/seek() semantics even more than they're screwed already. Turn it into a node-less deletion dirent instead -- a placeholder */ fd->raw = NULL; fd->ino = 0; break; } } mutex_unlock(&dir_f->sem); } /* dead_f is NULL if this was a rename not a real unlink */ /* Also catch the !f->inocache case, where there was a dirent pointing to an inode which didn't exist. */ if (dead_f && dead_f->inocache) { mutex_lock(&dead_f->sem); if (S_ISDIR(OFNI_EDONI_2SFFJ(dead_f)->i_mode)) { while (dead_f->dents) { /* There can be only deleted ones */ fd = dead_f->dents; dead_f->dents = fd->next; if (fd->ino) { pr_warn("Deleting inode #%u with active dentry \"%s\"->ino #%u\n", dead_f->inocache->ino, fd->name, fd->ino); } else { jffs2_dbg(1, "Removing deletion dirent for \"%s\" from dir ino #%u\n", fd->name, dead_f->inocache->ino); } if (fd->raw) jffs2_mark_node_obsolete(c, fd->raw); jffs2_free_full_dirent(fd); } dead_f->inocache->pino_nlink = 0; } else dead_f->inocache->pino_nlink--; /* NB: Caller must set inode nlink if appropriate */ mutex_unlock(&dead_f->sem); } jffs2_complete_reservation(c); return 0; } int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen, uint32_t time) { struct jffs2_raw_dirent *rd; struct jffs2_full_dirent *fd; uint32_t alloclen; int ret; rd = jffs2_alloc_raw_dirent(); if (!rd) return -ENOMEM; ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); if (ret) { jffs2_free_raw_dirent(rd); return ret; } mutex_lock(&dir_f->sem); /* Build a deletion node */ rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); rd->pino = cpu_to_je32(dir_f->inocache->ino); rd->version = cpu_to_je32(++dir_f->highest_version); rd->ino = cpu_to_je32(ino); rd->mctime = cpu_to_je32(time); rd->nsize = namelen; rd->type = type; rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_NORMAL); jffs2_free_raw_dirent(rd); if (IS_ERR(fd)) { jffs2_complete_reservation(c); mutex_unlock(&dir_f->sem); return PTR_ERR(fd); } /* File it. This will mark the old one obsolete. */ jffs2_add_fd_to_list(c, fd, &dir_f->dents); jffs2_complete_reservation(c); mutex_unlock(&dir_f->sem); return 0; }
gpl-2.0
sirgatez/Android-Eclair-Kernel-Source-v2.6.29.6
arch/mips/wrppmc/irq.c
9543
1516
/* * irq.c: GT64120 Interrupt Controller * * Copyright (C) 2006, Wind River System Inc. * Author: Rongkai.Zhan, <rongkai.zhan@windriver.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/hardirq.h> #include <linux/init.h> #include <linux/irq.h> #include <asm/gt64120.h> #include <asm/irq_cpu.h> #include <asm/mipsregs.h> asmlinkage void plat_irq_dispatch(void) { unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; if (pending & STATUSF_IP7) do_IRQ(WRPPMC_MIPS_TIMER_IRQ); /* CPU Compare/Count internal timer */ else if (pending & STATUSF_IP6) do_IRQ(WRPPMC_UART16550_IRQ); /* UART 16550 port */ else if (pending & STATUSF_IP3) do_IRQ(WRPPMC_PCI_INTA_IRQ); /* PCI INT_A */ else spurious_interrupt(); } /** * Initialize GT64120 Interrupt Controller */ void gt64120_init_pic(void) { /* clear CPU Interrupt Cause Registers */ GT_WRITE(GT_INTRCAUSE_OFS, (0x1F << 21)); GT_WRITE(GT_HINTRCAUSE_OFS, 0x00); /* Disable all interrupts from GT64120 bridge chip */ GT_WRITE(GT_INTRMASK_OFS, 0x00); GT_WRITE(GT_HINTRMASK_OFS, 0x00); GT_WRITE(GT_PCI0_ICMASK_OFS, 0x00); GT_WRITE(GT_PCI0_HICMASK_OFS, 0x00); } void __init arch_init_irq(void) { /* IRQ 0 - 7 are for MIPS common irq_cpu controller */ mips_cpu_irq_init(); gt64120_init_pic(); }
gpl-2.0
m7rom/android_kernel_htc_msm8960
drivers/media/rc/keymaps/rc-terratec-slim.c
9543
2365
/* * TerraTec remote controller keytable * * Copyright (C) 2010 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <media/rc-map.h> #include <linux/module.h> /* TerraTec slim remote, 7 rows, 4 columns. */ /* Uses NEC extended 0x02bd. */ static struct rc_map_table terratec_slim[] = { { 0x02bd00, KEY_1 }, { 0x02bd01, KEY_2 }, { 0x02bd02, KEY_3 }, { 0x02bd03, KEY_4 }, { 0x02bd04, KEY_5 }, { 0x02bd05, KEY_6 }, { 0x02bd06, KEY_7 }, { 0x02bd07, KEY_8 }, { 0x02bd08, KEY_9 }, { 0x02bd09, KEY_0 }, { 0x02bd0a, KEY_MUTE }, { 0x02bd0b, KEY_NEW }, /* symbol: PIP */ { 0x02bd0e, KEY_VOLUMEDOWN }, { 0x02bd0f, KEY_PLAYPAUSE }, { 0x02bd10, KEY_RIGHT }, { 0x02bd11, KEY_LEFT }, { 0x02bd12, KEY_UP }, { 0x02bd13, KEY_DOWN }, { 0x02bd15, KEY_OK }, { 0x02bd16, KEY_STOP }, { 0x02bd17, KEY_CAMERA }, /* snapshot */ { 0x02bd18, KEY_CHANNELUP }, { 0x02bd19, KEY_RECORD }, { 0x02bd1a, KEY_CHANNELDOWN }, { 0x02bd1c, KEY_ESC }, { 0x02bd1f, KEY_VOLUMEUP }, { 0x02bd44, KEY_EPG }, { 0x02bd45, KEY_POWER2 }, /* [red power button] */ }; static struct rc_map_list terratec_slim_map = { .map = { .scan = terratec_slim, .size = ARRAY_SIZE(terratec_slim), .rc_type = RC_TYPE_NEC, .name = RC_MAP_TERRATEC_SLIM, } }; static int __init init_rc_map_terratec_slim(void) { return rc_map_register(&terratec_slim_map); } static void __exit exit_rc_map_terratec_slim(void) { rc_map_unregister(&terratec_slim_map); } module_init(init_rc_map_terratec_slim) module_exit(exit_rc_map_terratec_slim) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
gpl-2.0
andyvand/android_kernel_samsung_crespo
arch/m68k/tools/amiga/dmesg.c
14151
1657
/* * linux/arch/m68k/tools/amiga/dmesg.c -- Retrieve the kernel messages stored * in Chip RAM with the kernel command * line option `debug=mem'. * * © Copyright 1996 by Geert Uytterhoeven <geert@linux-m68k.org> * * * Usage: * * dmesg * dmesg <CHIPMEM_END> * * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of the Linux * distribution for more details. */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #define CHIPMEM_START 0x00000000 #define CHIPMEM_END 0x00200000 /* overridden by argv[1] */ #define SAVEKMSG_MAGIC1 0x53415645 /* 'SAVE' */ #define SAVEKMSG_MAGIC2 0x4B4D5347 /* 'KMSG' */ struct savekmsg { u_long magic1; /* SAVEKMSG_MAGIC1 */ u_long magic2; /* SAVEKMSG_MAGIC2 */ u_long magicptr; /* address of magic1 */ u_long size; char data[0]; }; int main(int argc, char *argv[]) { u_long start = CHIPMEM_START, end = CHIPMEM_END, p; int found = 0; struct savekmsg *m = NULL; if (argc >= 2) end = strtoul(argv[1], NULL, 0); printf("Searching for SAVEKMSG magic...\n"); for (p = start; p <= end-sizeof(struct savekmsg); p += 4) { m = (struct savekmsg *)p; if ((m->magic1 == SAVEKMSG_MAGIC1) && (m->magic2 == SAVEKMSG_MAGIC2) && (m->magicptr == p)) { found = 1; break; } } if (!found) printf("Not found\n"); else { printf("Found %ld bytes at 0x%08lx\n", m->size, (u_long)&m->data); puts(">>>>>>>>>>>>>>>>>>>>"); fflush(stdout); write(1, &m->data, m->size); fflush(stdout); puts("<<<<<<<<<<<<<<<<<<<<"); } return(0); }
gpl-2.0
morogoku/MoRoKernel-S7-v2
drivers/scsi/scsi_module.c
14919
1688
/* * Copyright (C) 2003 Christoph Hellwig. * Released under GPL v2. * * Support for old-style host templates. * * NOTE: Do not use this for new drivers ever. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <scsi/scsi_host.h> static int __init init_this_scsi_driver(void) { struct scsi_host_template *sht = &driver_template; struct Scsi_Host *shost; struct list_head *l; int error; if (!sht->release) { printk(KERN_ERR "scsi HBA driver %s didn't set a release method.\n", sht->name); return -EINVAL; } sht->module = THIS_MODULE; INIT_LIST_HEAD(&sht->legacy_hosts); sht->detect(sht); if (list_empty(&sht->legacy_hosts)) return -ENODEV; list_for_each_entry(shost, &sht->legacy_hosts, sht_legacy_list) { error = scsi_add_host(shost, NULL); if (error) goto fail; scsi_scan_host(shost); } return 0; fail: l = &shost->sht_legacy_list; while ((l = l->prev) != &sht->legacy_hosts) scsi_remove_host(list_entry(l, struct Scsi_Host, sht_legacy_list)); return error; } static void __exit exit_this_scsi_driver(void) { struct scsi_host_template *sht = &driver_template; struct Scsi_Host *shost, *s; list_for_each_entry(shost, &sht->legacy_hosts, sht_legacy_list) scsi_remove_host(shost); list_for_each_entry_safe(shost, s, &sht->legacy_hosts, sht_legacy_list) sht->release(shost); if (list_empty(&sht->legacy_hosts)) return; printk(KERN_WARNING "%s did not call scsi_unregister\n", sht->name); dump_stack(); list_for_each_entry_safe(shost, s, &sht->legacy_hosts, sht_legacy_list) scsi_unregister(shost); } module_init(init_this_scsi_driver); module_exit(exit_this_scsi_driver);
gpl-2.0
firefoxu8833/huawei-kernel-test
drivers/rtc/interface.c
72
23104
/* * RTC subsystem, interface functions * * Copyright (C) 2005 Tower Technologies * Author: Alessandro Zummo <a.zummo@towertech.it> * * based on arch/arm/common/rtctime.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/rtc.h> #include <linux/sched.h> #include <linux/log2.h> #include <linux/workqueue.h> static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer); static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer); static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) { int err; if (!rtc->ops) err = -ENODEV; else if (!rtc->ops->read_time) err = -EINVAL; else { memset(tm, 0, sizeof(struct rtc_time)); err = rtc->ops->read_time(rtc->dev.parent, tm); } return err; } int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) { int err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; err = __rtc_read_time(rtc, tm); mutex_unlock(&rtc->ops_lock); return err; } EXPORT_SYMBOL_GPL(rtc_read_time); int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) { int err; err = rtc_valid_tm(tm); if (err != 0) return err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (!rtc->ops) err = -ENODEV; else if (rtc->ops->set_time) err = rtc->ops->set_time(rtc->dev.parent, tm); else if (rtc->ops->set_mmss) { unsigned long secs; err = rtc_tm_to_time(tm, &secs); if (err == 0) err = rtc->ops->set_mmss(rtc->dev.parent, secs); } else err = -EINVAL; mutex_unlock(&rtc->ops_lock); return err; } EXPORT_SYMBOL_GPL(rtc_set_time); int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs) { int err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (!rtc->ops) err = -ENODEV; else if (rtc->ops->set_mmss) err = rtc->ops->set_mmss(rtc->dev.parent, secs); else if (rtc->ops->read_time && rtc->ops->set_time) { struct rtc_time new, old; err = rtc->ops->read_time(rtc->dev.parent, &old); if (err == 0) { rtc_time_to_tm(secs, &new); /* * avoid writing when we're going to change the day of * the month. We will retry in the next minute. This * basically means that if the RTC must not drift * by more than 1 minute in 11 minutes. */ if (!((old.tm_hour == 23 && old.tm_min == 59) || (new.tm_hour == 23 && new.tm_min == 59))) err = rtc->ops->set_time(rtc->dev.parent, &new); } } else err = -EINVAL; mutex_unlock(&rtc->ops_lock); return err; } EXPORT_SYMBOL_GPL(rtc_set_mmss); static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (rtc->ops == NULL) err = -ENODEV; else if (!rtc->ops->read_alarm) err = -EINVAL; else { memset(alarm, 0, sizeof(struct rtc_wkalrm)); err = rtc->ops->read_alarm(rtc->dev.parent, alarm); } mutex_unlock(&rtc->ops_lock); return err; } int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; struct rtc_time before, now; int first_time = 1; unsigned long t_now, t_alm; enum { none, day, month, year } missing = none; unsigned days; /* The lower level RTC driver may return -1 in some fields, * creating invalid alarm->time values, for reasons like: * * - The hardware may not be capable of filling them in; * many alarms match only on time-of-day fields, not * day/month/year calendar data. * * - Some hardware uses illegal values as "wildcard" match * values, which non-Linux firmware (like a BIOS) may try * to set up as e.g. "alarm 15 minutes after each hour". * Linux uses only oneshot alarms. * * When we see that here, we deal with it by using values from * a current RTC timestamp for any missing (-1) values. The * RTC driver prevents "periodic alarm" modes. * * But this can be racey, because some fields of the RTC timestamp * may have wrapped in the interval since we read the RTC alarm, * which would lead to us inserting inconsistent values in place * of the -1 fields. * * Reading the alarm and timestamp in the reverse sequence * would have the same race condition, and not solve the issue. * * So, we must first read the RTC timestamp, * then read the RTC alarm value, * and then read a second RTC timestamp. * * If any fields of the second timestamp have changed * when compared with the first timestamp, then we know * our timestamp may be inconsistent with that used by * the low-level rtc_read_alarm_internal() function. * * So, when the two timestamps disagree, we just loop and do * the process again to get a fully consistent set of values. * * This could all instead be done in the lower level driver, * but since more than one lower level RTC implementation needs it, * then it's probably best best to do it here instead of there.. */ /* Get the "before" timestamp */ err = rtc_read_time(rtc, &before); if (err < 0) return err; do { if (!first_time) memcpy(&before, &now, sizeof(struct rtc_time)); first_time = 0; /* get the RTC alarm values, which may be incomplete */ err = rtc_read_alarm_internal(rtc, alarm); if (err) return err; /* full-function RTCs won't have such missing fields */ if (rtc_valid_tm(&alarm->time) == 0) return 0; /* get the "after" timestamp, to detect wrapped fields */ err = rtc_read_time(rtc, &now); if (err < 0) return err; /* note that tm_sec is a "don't care" value here: */ } while ( before.tm_min != now.tm_min || before.tm_hour != now.tm_hour || before.tm_mon != now.tm_mon || before.tm_year != now.tm_year); /* Fill in the missing alarm fields using the timestamp; we * know there's at least one since alarm->time is invalid. */ if (alarm->time.tm_sec == -1) alarm->time.tm_sec = now.tm_sec; if (alarm->time.tm_min == -1) alarm->time.tm_min = now.tm_min; if (alarm->time.tm_hour == -1) alarm->time.tm_hour = now.tm_hour; /* For simplicity, only support date rollover for now */ if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) { alarm->time.tm_mday = now.tm_mday; missing = day; } if ((unsigned)alarm->time.tm_mon >= 12) { alarm->time.tm_mon = now.tm_mon; if (missing == none) missing = month; } if (alarm->time.tm_year == -1) { alarm->time.tm_year = now.tm_year; if (missing == none) missing = year; } /* with luck, no rollover is needed */ rtc_tm_to_time(&now, &t_now); rtc_tm_to_time(&alarm->time, &t_alm); if (t_now < t_alm) goto done; switch (missing) { /* 24 hour rollover ... if it's now 10am Monday, an alarm that * that will trigger at 5am will do so at 5am Tuesday, which * could also be in the next month or year. This is a common * case, especially for PCs. */ case day: dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day"); t_alm += 24 * 60 * 60; rtc_time_to_tm(t_alm, &alarm->time); break; /* Month rollover ... if it's the 31th, an alarm on the 3rd will * be next month. An alarm matching on the 30th, 29th, or 28th * may end up in the month after that! Many newer PCs support * this type of alarm. */ case month: dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month"); do { if (alarm->time.tm_mon < 11) alarm->time.tm_mon++; else { alarm->time.tm_mon = 0; alarm->time.tm_year++; } days = rtc_month_days(alarm->time.tm_mon, alarm->time.tm_year); } while (days < alarm->time.tm_mday); break; /* Year rollover ... easy except for leap years! */ case year: dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year"); do { alarm->time.tm_year++; } while (rtc_valid_tm(&alarm->time) != 0); break; default: dev_warn(&rtc->dev, "alarm rollover not handled\n"); } done: return 0; } int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (rtc->ops == NULL) err = -ENODEV; else if (!rtc->ops->read_alarm) err = -EINVAL; else { memset(alarm, 0, sizeof(struct rtc_wkalrm)); alarm->enabled = rtc->aie_timer.enabled; alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires); } mutex_unlock(&rtc->ops_lock); return err; } EXPORT_SYMBOL_GPL(rtc_read_alarm); static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { struct rtc_time tm; long now, scheduled; int err; err = rtc_valid_tm(&alarm->time); if (err) return err; rtc_tm_to_time(&alarm->time, &scheduled); /* Make sure we're not setting alarms in the past */ err = __rtc_read_time(rtc, &tm); rtc_tm_to_time(&tm, &now); if (scheduled <= now) return -ETIME; /* * XXX - We just checked to make sure the alarm time is not * in the past, but there is still a race window where if * the is alarm set for the next second and the second ticks * over right here, before we set the alarm. */ if (!rtc->ops) err = -ENODEV; else if (!rtc->ops->set_alarm) err = -EINVAL; else err = rtc->ops->set_alarm(rtc->dev.parent, alarm); return err; } int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; err = rtc_valid_tm(&alarm->time); if (err != 0) return err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (rtc->aie_timer.enabled) { rtc_timer_remove(rtc, &rtc->aie_timer); } rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); rtc->aie_timer.period = ktime_set(0, 0); if (alarm->enabled) { err = rtc_timer_enqueue(rtc, &rtc->aie_timer); } mutex_unlock(&rtc->ops_lock); return err; } EXPORT_SYMBOL_GPL(rtc_set_alarm); /* Called once per device from rtc_device_register */ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; err = rtc_valid_tm(&alarm->time); if (err != 0) return err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); rtc->aie_timer.period = ktime_set(0, 0); if (alarm->enabled) { rtc->aie_timer.enabled = 1; timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node); } mutex_unlock(&rtc->ops_lock); return err; } EXPORT_SYMBOL_GPL(rtc_initialize_alarm); int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) { int err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (rtc->aie_timer.enabled != enabled) { if (enabled) err = rtc_timer_enqueue(rtc, &rtc->aie_timer); else rtc_timer_remove(rtc, &rtc->aie_timer); } if (err) /* nothing */; else if (!rtc->ops) err = -ENODEV; else if (!rtc->ops->alarm_irq_enable) err = -EINVAL; else err = rtc->ops->alarm_irq_enable(rtc->dev.parent, enabled); mutex_unlock(&rtc->ops_lock); return err; } EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable); int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) { int err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL if (enabled == 0 && rtc->uie_irq_active) { mutex_unlock(&rtc->ops_lock); return rtc_dev_update_irq_enable_emul(rtc, 0); } #endif /* make sure we're changing state */ if (rtc->uie_rtctimer.enabled == enabled) goto out; if (enabled) { struct rtc_time tm; ktime_t now, onesec; __rtc_read_time(rtc, &tm); onesec = ktime_set(1, 0); now = rtc_tm_to_ktime(tm); rtc->uie_rtctimer.node.expires = ktime_add(now, onesec); rtc->uie_rtctimer.period = ktime_set(1, 0); err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer); } else rtc_timer_remove(rtc, &rtc->uie_rtctimer); out: mutex_unlock(&rtc->ops_lock); #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL /* * Enable emulation if the driver did not provide * the update_irq_enable function pointer or if returned * -EINVAL to signal that it has been configured without * interrupts or that are not available at the moment. */ if (err == -EINVAL) err = rtc_dev_update_irq_enable_emul(rtc, enabled); #endif return err; } EXPORT_SYMBOL_GPL(rtc_update_irq_enable); /** * rtc_handle_legacy_irq - AIE, UIE and PIE event hook * @rtc: pointer to the rtc device * * This function is called when an AIE, UIE or PIE mode interrupt * has occurred (or been emulated). * * Triggers the registered irq_task function callback. */ void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) { unsigned long flags; /* mark one irq of the appropriate mode */ spin_lock_irqsave(&rtc->irq_lock, flags); rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode); spin_unlock_irqrestore(&rtc->irq_lock, flags); /* call the task func */ spin_lock_irqsave(&rtc->irq_task_lock, flags); if (rtc->irq_task) rtc->irq_task->func(rtc->irq_task->private_data); spin_unlock_irqrestore(&rtc->irq_task_lock, flags); wake_up_interruptible(&rtc->irq_queue); kill_fasync(&rtc->async_queue, SIGIO, POLL_IN); } /** * rtc_aie_update_irq - AIE mode rtctimer hook * @private: pointer to the rtc_device * * This functions is called when the aie_timer expires. */ void rtc_aie_update_irq(void *private) { struct rtc_device *rtc = (struct rtc_device *)private; rtc_handle_legacy_irq(rtc, 1, RTC_AF); } /** * rtc_uie_update_irq - UIE mode rtctimer hook * @private: pointer to the rtc_device * * This functions is called when the uie_timer expires. */ void rtc_uie_update_irq(void *private) { struct rtc_device *rtc = (struct rtc_device *)private; rtc_handle_legacy_irq(rtc, 1, RTC_UF); } /** * rtc_pie_update_irq - PIE mode hrtimer hook * @timer: pointer to the pie mode hrtimer * * This function is used to emulate PIE mode interrupts * using an hrtimer. This function is called when the periodic * hrtimer expires. */ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer) { struct rtc_device *rtc; ktime_t period; int count; rtc = container_of(timer, struct rtc_device, pie_timer); period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); count = hrtimer_forward_now(timer, period); rtc_handle_legacy_irq(rtc, count, RTC_PF); return HRTIMER_RESTART; } /** * rtc_update_irq - Triggered when a RTC interrupt occurs. * @rtc: the rtc device * @num: how many irqs are being reported (usually one) * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF * Context: any */ void rtc_update_irq(struct rtc_device *rtc, unsigned long num, unsigned long events) { schedule_work(&rtc->irqwork); } EXPORT_SYMBOL_GPL(rtc_update_irq); static int __rtc_match(struct device *dev, void *data) { char *name = (char *)data; if (strcmp(dev_name(dev), name) == 0) return 1; return 0; } struct rtc_device *rtc_class_open(char *name) { struct device *dev; struct rtc_device *rtc = NULL; dev = class_find_device(rtc_class, NULL, name, __rtc_match); if (dev) rtc = to_rtc_device(dev); if (rtc) { if (!try_module_get(rtc->owner)) { put_device(dev); rtc = NULL; } } return rtc; } EXPORT_SYMBOL_GPL(rtc_class_open); void rtc_class_close(struct rtc_device *rtc) { module_put(rtc->owner); put_device(&rtc->dev); } EXPORT_SYMBOL_GPL(rtc_class_close); int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task) { int retval = -EBUSY; if (task == NULL || task->func == NULL) return -EINVAL; /* Cannot register while the char dev is in use */ if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags)) return -EBUSY; spin_lock_irq(&rtc->irq_task_lock); if (rtc->irq_task == NULL) { rtc->irq_task = task; retval = 0; } spin_unlock_irq(&rtc->irq_task_lock); clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); return retval; } EXPORT_SYMBOL_GPL(rtc_irq_register); void rtc_irq_unregister(struct rtc_device *rtc, struct rtc_task *task) { spin_lock_irq(&rtc->irq_task_lock); if (rtc->irq_task == task) rtc->irq_task = NULL; spin_unlock_irq(&rtc->irq_task_lock); } EXPORT_SYMBOL_GPL(rtc_irq_unregister); static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled) { /* * We unconditionally cancel the timer here, because otherwise * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); * when we manage to start the timer before the callback * returns HRTIMER_RESTART. * * We cannot use hrtimer_cancel() here as a running callback * could be blocked on rtc->irq_task_lock and hrtimer_cancel() * would spin forever. */ if (hrtimer_try_to_cancel(&rtc->pie_timer) < 0) return -1; if (enabled) { ktime_t period = ktime_set(0, NSEC_PER_SEC / rtc->irq_freq); hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL); } return 0; } /** * rtc_irq_set_state - enable/disable 2^N Hz periodic IRQs * @rtc: the rtc device * @task: currently registered with rtc_irq_register() * @enabled: true to enable periodic IRQs * Context: any * * Note that rtc_irq_set_freq() should previously have been used to * specify the desired frequency of periodic IRQ task->func() callbacks. */ int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled) { int err = 0; unsigned long flags; retry: spin_lock_irqsave(&rtc->irq_task_lock, flags); if (rtc->irq_task != NULL && task == NULL) err = -EBUSY; if (rtc->irq_task != task) err = -EACCES; if (!err) { if (rtc_update_hrtimer(rtc, enabled) < 0) { spin_unlock_irqrestore(&rtc->irq_task_lock, flags); cpu_relax(); goto retry; } rtc->pie_enabled = enabled; } spin_unlock_irqrestore(&rtc->irq_task_lock, flags); return err; } EXPORT_SYMBOL_GPL(rtc_irq_set_state); /** * rtc_irq_set_freq - set 2^N Hz periodic IRQ frequency for IRQ * @rtc: the rtc device * @task: currently registered with rtc_irq_register() * @freq: positive frequency with which task->func() will be called * Context: any * * Note that rtc_irq_set_state() is used to enable or disable the * periodic IRQs. */ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq) { int err = 0; unsigned long flags; if (freq <= 0 || freq > RTC_MAX_FREQ) return -EINVAL; retry: spin_lock_irqsave(&rtc->irq_task_lock, flags); if (rtc->irq_task != NULL && task == NULL) err = -EBUSY; if (rtc->irq_task != task) err = -EACCES; if (!err) { rtc->irq_freq = freq; if (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0) { spin_unlock_irqrestore(&rtc->irq_task_lock, flags); cpu_relax(); goto retry; } } spin_unlock_irqrestore(&rtc->irq_task_lock, flags); return err; } EXPORT_SYMBOL_GPL(rtc_irq_set_freq); /** * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue * @rtc rtc device * @timer timer being added. * * Enqueues a timer onto the rtc devices timerqueue and sets * the next alarm event appropriately. * * Sets the enabled bit on the added timer. * * Must hold ops_lock for proper serialization of timerqueue */ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) { timer->enabled = 1; timerqueue_add(&rtc->timerqueue, &timer->node); if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) { struct rtc_wkalrm alarm; int err; alarm.time = rtc_ktime_to_tm(timer->node.expires); alarm.enabled = 1; err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) schedule_work(&rtc->irqwork); else if (err) { timerqueue_del(&rtc->timerqueue, &timer->node); timer->enabled = 0; return err; } } return 0; } /** * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue * @rtc rtc device * @timer timer being removed. * * Removes a timer onto the rtc devices timerqueue and sets * the next alarm event appropriately. * * Clears the enabled bit on the removed timer. * * Must hold ops_lock for proper serialization of timerqueue */ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) { struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); timerqueue_del(&rtc->timerqueue, &timer->node); timer->enabled = 0; if (next == &timer->node) { struct rtc_wkalrm alarm; int err; next = timerqueue_getnext(&rtc->timerqueue); if (!next) return; alarm.time = rtc_ktime_to_tm(next->expires); alarm.enabled = 1; err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) schedule_work(&rtc->irqwork); } } /** * rtc_timer_do_work - Expires rtc timers * @rtc rtc device * @timer timer being removed. * * Expires rtc timers. Reprograms next alarm event if needed. * Called via worktask. * * Serializes access to timerqueue via ops_lock mutex */ void rtc_timer_do_work(struct work_struct *work) { struct rtc_timer *timer; struct timerqueue_node *next; ktime_t now; struct rtc_time tm; struct rtc_device *rtc = container_of(work, struct rtc_device, irqwork); mutex_lock(&rtc->ops_lock); again: __rtc_read_time(rtc, &tm); now = rtc_tm_to_ktime(tm); while ((next = timerqueue_getnext(&rtc->timerqueue))) { if (next->expires.tv64 > now.tv64) break; /* expire timer */ timer = container_of(next, struct rtc_timer, node); timerqueue_del(&rtc->timerqueue, &timer->node); timer->enabled = 0; if (timer->task.func) timer->task.func(timer->task.private_data); /* Re-add/fwd periodic timers */ if (ktime_to_ns(timer->period)) { timer->node.expires = ktime_add(timer->node.expires, timer->period); timer->enabled = 1; timerqueue_add(&rtc->timerqueue, &timer->node); } } /* Set next alarm */ if (next) { struct rtc_wkalrm alarm; int err; alarm.time = rtc_ktime_to_tm(next->expires); alarm.enabled = 1; err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) goto again; } mutex_unlock(&rtc->ops_lock); } /* rtc_timer_init - Initializes an rtc_timer * @timer: timer to be intiialized * @f: function pointer to be called when timer fires * @data: private data passed to function pointer * * Kernel interface to initializing an rtc_timer. */ void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data) { timerqueue_init(&timer->node); timer->enabled = 0; timer->task.func = f; timer->task.private_data = data; } /* rtc_timer_start - Sets an rtc_timer to fire in the future * @ rtc: rtc device to be used * @ timer: timer being set * @ expires: time at which to expire the timer * @ period: period that the timer will recur * * Kernel interface to set an rtc_timer */ int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer, ktime_t expires, ktime_t period) { int ret = 0; mutex_lock(&rtc->ops_lock); if (timer->enabled) rtc_timer_remove(rtc, timer); timer->node.expires = expires; timer->period = period; ret = rtc_timer_enqueue(rtc, timer); mutex_unlock(&rtc->ops_lock); return ret; } /* rtc_timer_cancel - Stops an rtc_timer * @ rtc: rtc device to be used * @ timer: timer being set * * Kernel interface to cancel an rtc_timer */ int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer) { int ret = 0; mutex_lock(&rtc->ops_lock); if (timer->enabled) rtc_timer_remove(rtc, timer); mutex_unlock(&rtc->ops_lock); return ret; }
gpl-2.0
DartDevs/rfsfsrshit
drivers/rapidio/switches/idt_gen2.c
72
11254
/* * IDT CPS Gen.2 Serial RapidIO switch family support * * Copyright 2010 Integrated Device Technology, Inc. * Alexandre Bounine <alexandre.bounine@idt.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/rio_ids.h> #include <linux/delay.h> #include "../rio.h" #define LOCAL_RTE_CONF_DESTID_SEL 0x010070 #define LOCAL_RTE_CONF_DESTID_SEL_PSEL 0x0000001f #define IDT_LT_ERR_REPORT_EN 0x03100c #define IDT_PORT_ERR_REPORT_EN(n) (0x031044 + (n)*0x40) #define IDT_PORT_ERR_REPORT_EN_BC 0x03ff04 #define IDT_PORT_ISERR_REPORT_EN(n) (0x03104C + (n)*0x40) #define IDT_PORT_ISERR_REPORT_EN_BC 0x03ff0c #define IDT_PORT_INIT_TX_ACQUIRED 0x00000020 #define IDT_LANE_ERR_REPORT_EN(n) (0x038010 + (n)*0x100) #define IDT_LANE_ERR_REPORT_EN_BC 0x03ff10 #define IDT_DEV_CTRL_1 0xf2000c #define IDT_DEV_CTRL_1_GENPW 0x02000000 #define IDT_DEV_CTRL_1_PRSTBEH 0x00000001 #define IDT_CFGBLK_ERR_CAPTURE_EN 0x020008 #define IDT_CFGBLK_ERR_REPORT 0xf20014 #define IDT_CFGBLK_ERR_REPORT_GENPW 0x00000002 #define IDT_AUX_PORT_ERR_CAP_EN 0x020000 #define IDT_AUX_ERR_REPORT_EN 0xf20018 #define IDT_AUX_PORT_ERR_LOG_I2C 0x00000002 #define IDT_AUX_PORT_ERR_LOG_JTAG 0x00000001 #define IDT_ISLTL_ADDRESS_CAP 0x021014 #define IDT_RIO_DOMAIN 0xf20020 #define IDT_RIO_DOMAIN_MASK 0x000000ff #define IDT_PW_INFO_CSR 0xf20024 #define IDT_SOFT_RESET 0xf20040 #define IDT_SOFT_RESET_REQ 0x00030097 #define IDT_I2C_MCTRL 0xf20050 #define IDT_I2C_MCTRL_GENPW 0x04000000 #define IDT_JTAG_CTRL 0xf2005c #define IDT_JTAG_CTRL_GENPW 0x00000002 #define IDT_LANE_CTRL(n) (0xff8000 + (n)*0x100) #define IDT_LANE_CTRL_BC 0xffff00 #define IDT_LANE_CTRL_GENPW 0x00200000 #define IDT_LANE_DFE_1_BC 0xffff18 #define IDT_LANE_DFE_2_BC 0xffff1c #define IDT_PORT_OPS(n) (0xf40004 + (n)*0x100) #define IDT_PORT_OPS_GENPW 0x08000000 #define IDT_PORT_OPS_PL_ELOG 0x00000040 #define IDT_PORT_OPS_LL_ELOG 0x00000020 #define IDT_PORT_OPS_LT_ELOG 0x00000010 #define IDT_PORT_OPS_BC 0xf4ff04 #define IDT_PORT_ISERR_DET(n) (0xf40008 + (n)*0x100) #define IDT_ERR_CAP 0xfd0000 #define IDT_ERR_CAP_LOG_OVERWR 0x00000004 #define IDT_ERR_RD 0xfd0004 #define IDT_DEFAULT_ROUTE 0xde #define IDT_NO_ROUTE 0xdf static int idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 route_port) { /* * Select routing table to update */ if (table == RIO_GLOBAL_TABLE) table = 0; else table++; rio_mport_write_config_32(mport, destid, hopcount, LOCAL_RTE_CONF_DESTID_SEL, table); /* * Program destination port for the specified destID */ rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, (u32)route_destid); rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, (u32)route_port); udelay(10); return 0; } static int idtg2_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 *route_port) { u32 result; /* * Select routing table to read */ if (table == RIO_GLOBAL_TABLE) table = 0; else table++; rio_mport_write_config_32(mport, destid, hopcount, LOCAL_RTE_CONF_DESTID_SEL, table); rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); rio_mport_read_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); if (IDT_DEFAULT_ROUTE == (u8)result || IDT_NO_ROUTE == (u8)result) *route_port = RIO_INVALID_ROUTE; else *route_port = (u8)result; return 0; } static int idtg2_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table) { u32 i; /* * Select routing table to read */ if (table == RIO_GLOBAL_TABLE) table = 0; else table++; rio_mport_write_config_32(mport, destid, hopcount, LOCAL_RTE_CONF_DESTID_SEL, table); for (i = RIO_STD_RTE_CONF_EXTCFGEN; i <= (RIO_STD_RTE_CONF_EXTCFGEN | 0xff);) { rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, i); rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, (IDT_DEFAULT_ROUTE << 24) | (IDT_DEFAULT_ROUTE << 16) | (IDT_DEFAULT_ROUTE << 8) | IDT_DEFAULT_ROUTE); i += 4; } return 0; } static int idtg2_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount, u8 sw_domain) { /* * Switch domain configuration operates only at global level */ rio_mport_write_config_32(mport, destid, hopcount, IDT_RIO_DOMAIN, (u32)sw_domain); return 0; } static int idtg2_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, u8 *sw_domain) { u32 regval; /* * Switch domain configuration operates only at global level */ rio_mport_read_config_32(mport, destid, hopcount, IDT_RIO_DOMAIN, &regval); *sw_domain = (u8)(regval & 0xff); return 0; } static int idtg2_em_init(struct rio_dev *rdev) { u32 regval; int i, tmp; /* * This routine performs device-specific initialization only. * All standard EM configuration should be performed at upper level. */ pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); /* Set Port-Write info CSR: PRIO=3 and CRF=1 */ rio_write_config_32(rdev, IDT_PW_INFO_CSR, 0x0000e000); /* * Configure LT LAYER error reporting. */ /* Enable standard (RIO.p8) error reporting */ rio_write_config_32(rdev, IDT_LT_ERR_REPORT_EN, REM_LTL_ERR_ILLTRAN | REM_LTL_ERR_UNSOLR | REM_LTL_ERR_UNSUPTR); /* Use Port-Writes for LT layer error reporting. * Enable per-port reset */ rio_read_config_32(rdev, IDT_DEV_CTRL_1, &regval); rio_write_config_32(rdev, IDT_DEV_CTRL_1, regval | IDT_DEV_CTRL_1_GENPW | IDT_DEV_CTRL_1_PRSTBEH); /* * Configure PORT error reporting. */ /* Report all RIO.p8 errors supported by device */ rio_write_config_32(rdev, IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037); /* Configure reporting of implementation specific errors/events */ rio_write_config_32(rdev, IDT_PORT_ISERR_REPORT_EN_BC, IDT_PORT_INIT_TX_ACQUIRED); /* Use Port-Writes for port error reporting and enable error logging */ tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo); for (i = 0; i < tmp; i++) { rio_read_config_32(rdev, IDT_PORT_OPS(i), &regval); rio_write_config_32(rdev, IDT_PORT_OPS(i), regval | IDT_PORT_OPS_GENPW | IDT_PORT_OPS_PL_ELOG | IDT_PORT_OPS_LL_ELOG | IDT_PORT_OPS_LT_ELOG); } /* Overwrite error log if full */ rio_write_config_32(rdev, IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR); /* * Configure LANE error reporting. */ /* Disable line error reporting */ rio_write_config_32(rdev, IDT_LANE_ERR_REPORT_EN_BC, 0); /* Use Port-Writes for lane error reporting (when enabled) * (do per-lane update because lanes may have different configuration) */ tmp = (rdev->did == RIO_DID_IDTCPS1848) ? 48 : 16; for (i = 0; i < tmp; i++) { rio_read_config_32(rdev, IDT_LANE_CTRL(i), &regval); rio_write_config_32(rdev, IDT_LANE_CTRL(i), regval | IDT_LANE_CTRL_GENPW); } /* * Configure AUX error reporting. */ /* Disable JTAG and I2C Error capture */ rio_write_config_32(rdev, IDT_AUX_PORT_ERR_CAP_EN, 0); /* Disable JTAG and I2C Error reporting/logging */ rio_write_config_32(rdev, IDT_AUX_ERR_REPORT_EN, 0); /* Disable Port-Write notification from JTAG */ rio_write_config_32(rdev, IDT_JTAG_CTRL, 0); /* Disable Port-Write notification from I2C */ rio_read_config_32(rdev, IDT_I2C_MCTRL, &regval); rio_write_config_32(rdev, IDT_I2C_MCTRL, regval & ~IDT_I2C_MCTRL_GENPW); /* * Configure CFG_BLK error reporting. */ /* Disable Configuration Block error capture */ rio_write_config_32(rdev, IDT_CFGBLK_ERR_CAPTURE_EN, 0); /* Disable Port-Writes for Configuration Block error reporting */ rio_read_config_32(rdev, IDT_CFGBLK_ERR_REPORT, &regval); rio_write_config_32(rdev, IDT_CFGBLK_ERR_REPORT, regval & ~IDT_CFGBLK_ERR_REPORT_GENPW); /* set TVAL = ~50us */ rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); return 0; } static int idtg2_em_handler(struct rio_dev *rdev, u8 portnum) { u32 regval, em_perrdet, em_ltlerrdet; rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet); if (em_ltlerrdet) { /* Service Logical/Transport Layer Error(s) */ if (em_ltlerrdet & REM_LTL_ERR_IMPSPEC) { /* Implementation specific error reported */ rio_read_config_32(rdev, IDT_ISLTL_ADDRESS_CAP, &regval); pr_debug("RIO: %s Implementation Specific LTL errors" \ " 0x%x @(0x%x)\n", rio_name(rdev), em_ltlerrdet, regval); /* Clear implementation specific address capture CSR */ rio_write_config_32(rdev, IDT_ISLTL_ADDRESS_CAP, 0); } } rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet); if (em_perrdet) { /* Service Port-Level Error(s) */ if (em_perrdet & REM_PED_IMPL_SPEC) { /* Implementation Specific port error reported */ /* Get IS errors reported */ rio_read_config_32(rdev, IDT_PORT_ISERR_DET(portnum), &regval); pr_debug("RIO: %s Implementation Specific Port" \ " errors 0x%x\n", rio_name(rdev), regval); /* Clear all implementation specific events */ rio_write_config_32(rdev, IDT_PORT_ISERR_DET(portnum), 0); } } return 0; } static ssize_t idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf) { struct rio_dev *rdev = to_rio_dev(dev); ssize_t len = 0; u32 regval; while (!rio_read_config_32(rdev, IDT_ERR_RD, &regval)) { if (!regval) /* 0 = end of log */ break; len += snprintf(buf + len, PAGE_SIZE - len, "%08x\n", regval); if (len >= (PAGE_SIZE - 10)) break; } return len; } static DEVICE_ATTR(errlog, S_IRUGO, idtg2_show_errlog, NULL); static int idtg2_sysfs(struct rio_dev *rdev, int create) { struct device *dev = &rdev->dev; int err = 0; if (create == RIO_SW_SYSFS_CREATE) { /* Initialize sysfs entries */ err = device_create_file(dev, &dev_attr_errlog); if (err) dev_err(dev, "Unable create sysfs errlog file\n"); } else device_remove_file(dev, &dev_attr_errlog); return err; } static int idtg2_switch_init(struct rio_dev *rdev, int do_enum) { pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); rdev->rswitch->add_entry = idtg2_route_add_entry; rdev->rswitch->get_entry = idtg2_route_get_entry; rdev->rswitch->clr_table = idtg2_route_clr_table; rdev->rswitch->set_domain = idtg2_set_domain; rdev->rswitch->get_domain = idtg2_get_domain; rdev->rswitch->em_init = idtg2_em_init; rdev->rswitch->em_handle = idtg2_em_handler; rdev->rswitch->sw_sysfs = idtg2_sysfs; return 0; } DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init); DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init); DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init); DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init);
gpl-2.0
BOOTMGR/lge_victo_msm7x30-CM
drivers/video/msm/vidc/common/vcd/vcd_power_sm.c
72
8376
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ #include "vidc_type.h" #include "vcd_power_sm.h" #include "vcd_core.h" #include "vcd.h" u32 vcd_power_event( struct vcd_dev_ctxt *dev_ctxt, struct vcd_clnt_ctxt *cctxt, u32 event) { u32 rc = VCD_S_SUCCESS; VCD_MSG_MED("Device power state = %d", dev_ctxt->pwr_clk_state); VCD_MSG_MED("event = 0x%x", event); switch (event) { case VCD_EVT_PWR_DEV_INIT_BEGIN: case VCD_EVT_PWR_DEV_INIT_END: case VCD_EVT_PWR_DEV_INIT_FAIL: case VCD_EVT_PWR_DEV_TERM_BEGIN: case VCD_EVT_PWR_DEV_TERM_END: case VCD_EVT_PWR_DEV_TERM_FAIL: case VCD_EVT_PWR_DEV_SLEEP_BEGIN: case VCD_EVT_PWR_DEV_SLEEP_END: case VCD_EVT_PWR_DEV_SET_PERFLVL: case VCD_EVT_PWR_DEV_HWTIMEOUT: { rc = vcd_device_power_event(dev_ctxt, event, cctxt); break; } case VCD_EVT_PWR_CLNT_CMD_BEGIN: case VCD_EVT_PWR_CLNT_CMD_END: case VCD_EVT_PWR_CLNT_CMD_FAIL: case VCD_EVT_PWR_CLNT_PAUSE: case VCD_EVT_PWR_CLNT_RESUME: case VCD_EVT_PWR_CLNT_FIRST_FRAME: case VCD_EVT_PWR_CLNT_LAST_FRAME: case VCD_EVT_PWR_CLNT_ERRFATAL: { rc = vcd_client_power_event(dev_ctxt, cctxt, event); break; } } if (VCD_FAILED(rc)) VCD_MSG_ERROR("vcd_power_event: event 0x%x failed", event); return rc; } u32 vcd_device_power_event(struct vcd_dev_ctxt *dev_ctxt, u32 event, struct vcd_clnt_ctxt *cctxt) { u32 rc = VCD_ERR_FAIL; u32 set_perf_lvl; switch (event) { case VCD_EVT_PWR_DEV_INIT_BEGIN: { if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF) { if (res_trk_get_max_perf_level(&dev_ctxt-> max_perf_lvl)) { if (res_trk_power_up()) { dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_NOTCLOCKED; dev_ctxt->curr_perf_lvl = 0; dev_ctxt->reqd_perf_lvl = 0; dev_ctxt->active_clnts = 0; dev_ctxt-> set_perf_lvl_pending = false; rc = vcd_enable_clock(dev_ctxt, cctxt); if (VCD_FAILED(rc)) { (void)res_trk_power_down(); dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_OFF; } } } } break; } case VCD_EVT_PWR_DEV_INIT_END: case VCD_EVT_PWR_DEV_TERM_FAIL: case VCD_EVT_PWR_DEV_SLEEP_BEGIN: case VCD_EVT_PWR_DEV_HWTIMEOUT: { rc = vcd_gate_clock(dev_ctxt); break; } case VCD_EVT_PWR_DEV_INIT_FAIL: case VCD_EVT_PWR_DEV_TERM_END: { if (dev_ctxt->pwr_clk_state != VCD_PWRCLK_STATE_OFF) { (void)vcd_disable_clock(dev_ctxt); (void)res_trk_power_down(); dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_OFF; dev_ctxt->curr_perf_lvl = 0; dev_ctxt->reqd_perf_lvl = 0; dev_ctxt->active_clnts = 0; dev_ctxt->set_perf_lvl_pending = false; rc = VCD_S_SUCCESS; } break; } case VCD_EVT_PWR_DEV_TERM_BEGIN: case VCD_EVT_PWR_DEV_SLEEP_END: { rc = vcd_un_gate_clock(dev_ctxt); break; } case VCD_EVT_PWR_DEV_SET_PERFLVL: { set_perf_lvl = dev_ctxt->reqd_perf_lvl > 0 ? dev_ctxt-> reqd_perf_lvl : VCD_MIN_PERF_LEVEL; rc = vcd_set_perf_level(dev_ctxt, set_perf_lvl); break; } } return rc; } u32 vcd_client_power_event( struct vcd_dev_ctxt *dev_ctxt, struct vcd_clnt_ctxt *cctxt, u32 event) { u32 rc = VCD_ERR_FAIL; switch (event) { case VCD_EVT_PWR_CLNT_CMD_BEGIN: { rc = vcd_un_gate_clock(dev_ctxt); break; } case VCD_EVT_PWR_CLNT_CMD_END: { rc = vcd_gate_clock(dev_ctxt); break; } case VCD_EVT_PWR_CLNT_CMD_FAIL: { if (!vcd_core_is_busy(dev_ctxt)) rc = vcd_gate_clock(dev_ctxt); break; } case VCD_EVT_PWR_CLNT_PAUSE: case VCD_EVT_PWR_CLNT_LAST_FRAME: case VCD_EVT_PWR_CLNT_ERRFATAL: { if (cctxt) { rc = VCD_S_SUCCESS; if (cctxt->status.req_perf_lvl) { dev_ctxt->reqd_perf_lvl -= cctxt->reqd_perf_lvl; cctxt->status.req_perf_lvl = false; rc = vcd_set_perf_level(dev_ctxt, dev_ctxt->reqd_perf_lvl); } } break; } case VCD_EVT_PWR_CLNT_RESUME: case VCD_EVT_PWR_CLNT_FIRST_FRAME: { if (cctxt) { rc = VCD_S_SUCCESS; if (!cctxt->status.req_perf_lvl) { dev_ctxt->reqd_perf_lvl += cctxt->reqd_perf_lvl; cctxt->status.req_perf_lvl = true; rc = vcd_set_perf_level(dev_ctxt, dev_ctxt->reqd_perf_lvl); } } break; } } return rc; } u32 vcd_enable_clock(struct vcd_dev_ctxt *dev_ctxt, struct vcd_clnt_ctxt *cctxt) { u32 rc = VCD_S_SUCCESS; u32 set_perf_lvl; if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF) { VCD_MSG_ERROR("vcd_enable_clock(): Already in state " "VCD_PWRCLK_STATE_OFF\n"); rc = VCD_ERR_FAIL; } else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_NOTCLOCKED) { set_perf_lvl = dev_ctxt->reqd_perf_lvl > 0 ? dev_ctxt-> reqd_perf_lvl : VCD_MIN_PERF_LEVEL; rc = vcd_set_perf_level(dev_ctxt, set_perf_lvl); if (!VCD_FAILED(rc)) { if (res_trk_enable_clocks()) { dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_CLOCKED; } } else { rc = VCD_ERR_FAIL; } } if (!VCD_FAILED(rc)) dev_ctxt->active_clnts++; return rc; } u32 vcd_disable_clock(struct vcd_dev_ctxt *dev_ctxt) { u32 rc = VCD_S_SUCCESS; if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF) { VCD_MSG_ERROR("vcd_disable_clock(): Already in state " "VCD_PWRCLK_STATE_OFF\n"); rc = VCD_ERR_FAIL; } else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKED || dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKGATED) { dev_ctxt->active_clnts--; if (!dev_ctxt->active_clnts) { if (!res_trk_disable_clocks()) rc = VCD_ERR_FAIL; dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_NOTCLOCKED; dev_ctxt->curr_perf_lvl = 0; } } return rc; } u32 vcd_set_perf_level(struct vcd_dev_ctxt *dev_ctxt, u32 perf_lvl) { u32 rc = VCD_S_SUCCESS; if (!vcd_core_is_busy(dev_ctxt)) { if (res_trk_set_perf_level(perf_lvl, &dev_ctxt->curr_perf_lvl, dev_ctxt)) { dev_ctxt->set_perf_lvl_pending = false; } else { rc = VCD_ERR_FAIL; dev_ctxt->set_perf_lvl_pending = true; } } else { dev_ctxt->set_perf_lvl_pending = true; } return rc; } u32 vcd_update_clnt_perf_lvl( struct vcd_clnt_ctxt *cctxt, struct vcd_property_frame_rate *fps, u32 frm_p_units) { u32 rc = VCD_S_SUCCESS; struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; u32 new_perf_lvl; new_perf_lvl = frm_p_units * fps->fps_numerator / fps->fps_denominator; if (cctxt->status.req_perf_lvl) { dev_ctxt->reqd_perf_lvl = dev_ctxt->reqd_perf_lvl - cctxt->reqd_perf_lvl + new_perf_lvl; rc = vcd_set_perf_level(cctxt->dev_ctxt, dev_ctxt->reqd_perf_lvl); } cctxt->reqd_perf_lvl = new_perf_lvl; return rc; } u32 vcd_gate_clock(struct vcd_dev_ctxt *dev_ctxt) { u32 rc = VCD_S_SUCCESS; #ifndef VIDC_1080p_DISABLE_GATING if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF || dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_NOTCLOCKED) { VCD_MSG_ERROR("%s(): Clk is Off or Not Clked yet\n", __func__); rc = VCD_ERR_FAIL; } else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKGATED) rc = VCD_S_SUCCESS; else if (res_trk_disable_clocks()) dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_CLOCKGATED; else rc = VCD_ERR_FAIL; #endif return rc; } u32 vcd_un_gate_clock(struct vcd_dev_ctxt *dev_ctxt) { u32 rc = VCD_S_SUCCESS; #ifndef VIDC_1080p_DISABLE_GATING if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF || dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_NOTCLOCKED) { VCD_MSG_ERROR("%s(): Clk is Off or Not Clked yet\n", __func__); rc = VCD_ERR_FAIL; } else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKED) rc = VCD_S_SUCCESS; else if (res_trk_enable_clocks()) dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_CLOCKED; else rc = VCD_ERR_FAIL; #endif return rc; }
gpl-2.0
kgilmer/openjdk-7-mermaid
src/share/native/sun/security/ec/impl/mplogic.c
72
5897
/* * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ /* ********************************************************************* * * The Original Code is the MPI Arbitrary Precision Integer Arithmetic library. * * The Initial Developer of the Original Code is * Michael J. Fromberger. * Portions created by the Initial Developer are Copyright (C) 1998 * the Initial Developer. All Rights Reserved. * * Contributor(s): * *********************************************************************** */ /* Bitwise logical operations on MPI values */ #include "mpi-priv.h" #include "mplogic.h" /* {{{ Lookup table for population count */ static unsigned char bitc[] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 }; /* }}} */ /* mpl_rsh(a, b, d) - b = a >> d mpl_lsh(a, b, d) - b = a << d */ /* {{{ mpl_rsh(a, b, d) */ mp_err mpl_rsh(const mp_int *a, mp_int *b, mp_digit d) { mp_err res; ARGCHK(a != NULL && b != NULL, MP_BADARG); if((res = mp_copy(a, b)) != MP_OKAY) return res; s_mp_div_2d(b, d); return MP_OKAY; } /* end mpl_rsh() */ /* }}} */ /* {{{ mpl_lsh(a, b, d) */ mp_err mpl_lsh(const mp_int *a, mp_int *b, mp_digit d) { mp_err res; ARGCHK(a != NULL && b != NULL, MP_BADARG); if((res = mp_copy(a, b)) != MP_OKAY) return res; return s_mp_mul_2d(b, d); } /* end mpl_lsh() */ /* }}} */ /*------------------------------------------------------------------------*/ /* mpl_set_bit Returns MP_OKAY or some error code. Grows a if needed to set a bit to 1. */ mp_err mpl_set_bit(mp_int *a, mp_size bitNum, mp_size value) { mp_size ix; mp_err rv; mp_digit mask; ARGCHK(a != NULL, MP_BADARG); ix = bitNum / MP_DIGIT_BIT; if (ix + 1 > MP_USED(a)) { rv = s_mp_pad(a, ix + 1); if (rv != MP_OKAY) return rv; } bitNum = bitNum % MP_DIGIT_BIT; mask = (mp_digit)1 << bitNum; if (value) MP_DIGIT(a,ix) |= mask; else MP_DIGIT(a,ix) &= ~mask; s_mp_clamp(a); return MP_OKAY; } /* mpl_get_bit returns 0 or 1 or some (negative) error code. */ mp_err mpl_get_bit(const mp_int *a, mp_size bitNum) { mp_size bit, ix; mp_err rv; ARGCHK(a != NULL, MP_BADARG); ix = bitNum / MP_DIGIT_BIT; ARGCHK(ix <= MP_USED(a) - 1, MP_RANGE); bit = bitNum % MP_DIGIT_BIT; rv = (mp_err)(MP_DIGIT(a, ix) >> bit) & 1; return rv; } /* mpl_get_bits - Extracts numBits bits from a, where the least significant extracted bit is bit lsbNum. Returns a negative value if error occurs. - Because sign bit is used to indicate error, maximum number of bits to be returned is the lesser of (a) the number of bits in an mp_digit, or (b) one less than the number of bits in an mp_err. - lsbNum + numbits can be greater than the number of significant bits in integer a, as long as bit lsbNum is in the high order digit of a. */ mp_err mpl_get_bits(const mp_int *a, mp_size lsbNum, mp_size numBits) { mp_size rshift = (lsbNum % MP_DIGIT_BIT); mp_size lsWndx = (lsbNum / MP_DIGIT_BIT); mp_digit * digit = MP_DIGITS(a) + lsWndx; mp_digit mask = ((1 << numBits) - 1); ARGCHK(numBits < CHAR_BIT * sizeof mask, MP_BADARG); ARGCHK(MP_HOWMANY(lsbNum, MP_DIGIT_BIT) <= MP_USED(a), MP_RANGE); if ((numBits + lsbNum % MP_DIGIT_BIT <= MP_DIGIT_BIT) || (lsWndx + 1 >= MP_USED(a))) { mask &= (digit[0] >> rshift); } else { mask &= ((digit[0] >> rshift) | (digit[1] << (MP_DIGIT_BIT - rshift))); } return (mp_err)mask; } /* mpl_significant_bits returns number of significnant bits in abs(a). returns 1 if value is zero. */ mp_err mpl_significant_bits(const mp_int *a) { mp_err bits = 0; int ix; ARGCHK(a != NULL, MP_BADARG); ix = MP_USED(a); for (ix = MP_USED(a); ix > 0; ) { mp_digit d; d = MP_DIGIT(a, --ix); if (d) { while (d) { ++bits; d >>= 1; } break; } } bits += ix * MP_DIGIT_BIT; if (!bits) bits = 1; return bits; } /*------------------------------------------------------------------------*/ /* HERE THERE BE DRAGONS */
gpl-2.0
nagataka/linux-2.6.32.65
drivers/input/input.c
72
45777
/* * The input core * * Copyright (c) 1999-2002 Vojtech Pavlik */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/init.h> #include <linux/types.h> #include <linux/input.h> #include <linux/module.h> #include <linux/random.h> #include <linux/major.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/poll.h> #include <linux/device.h> #include <linux/mutex.h> #include <linux/rcupdate.h> #include <linux/smp_lock.h> #include "input-compat.h" MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); MODULE_DESCRIPTION("Input core"); MODULE_LICENSE("GPL"); #define INPUT_DEVICES 256 /* * EV_ABS events which should not be cached are listed here. */ static unsigned int input_abs_bypass_init_data[] __initdata = { ABS_MT_TOUCH_MAJOR, ABS_MT_TOUCH_MINOR, ABS_MT_WIDTH_MAJOR, ABS_MT_WIDTH_MINOR, ABS_MT_ORIENTATION, ABS_MT_POSITION_X, ABS_MT_POSITION_Y, ABS_MT_TOOL_TYPE, ABS_MT_BLOB_ID, ABS_MT_TRACKING_ID, 0 }; static unsigned long input_abs_bypass[BITS_TO_LONGS(ABS_CNT)]; static LIST_HEAD(input_dev_list); static LIST_HEAD(input_handler_list); /* * input_mutex protects access to both input_dev_list and input_handler_list. * This also causes input_[un]register_device and input_[un]register_handler * be mutually exclusive which simplifies locking in drivers implementing * input handlers. */ static DEFINE_MUTEX(input_mutex); static struct input_handler *input_table[8]; static inline int is_event_supported(unsigned int code, unsigned long *bm, unsigned int max) { return code <= max && test_bit(code, bm); } static int input_defuzz_abs_event(int value, int old_val, int fuzz) { if (fuzz) { if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2) return old_val; if (value > old_val - fuzz && value < old_val + fuzz) return (old_val * 3 + value) / 4; if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2) return (old_val + value) / 2; } return value; } /* * Pass event through all open handles. This function is called with * dev->event_lock held and interrupts disabled. */ static void input_pass_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { struct input_handle *handle; rcu_read_lock(); handle = rcu_dereference(dev->grab); if (handle) handle->handler->event(handle, type, code, value); else list_for_each_entry_rcu(handle, &dev->h_list, d_node) if (handle->open) handle->handler->event(handle, type, code, value); rcu_read_unlock(); } /* * Generate software autorepeat event. Note that we take * dev->event_lock here to avoid racing with input_event * which may cause keys get "stuck". */ static void input_repeat_key(unsigned long data) { struct input_dev *dev = (void *) data; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); if (test_bit(dev->repeat_key, dev->key) && is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { input_pass_event(dev, EV_KEY, dev->repeat_key, 2); if (dev->sync) { /* * Only send SYN_REPORT if we are not in a middle * of driver parsing a new hardware packet. * Otherwise assume that the driver will send * SYN_REPORT once it's done. */ input_pass_event(dev, EV_SYN, SYN_REPORT, 1); } if (dev->rep[REP_PERIOD]) mod_timer(&dev->timer, jiffies + msecs_to_jiffies(dev->rep[REP_PERIOD])); } spin_unlock_irqrestore(&dev->event_lock, flags); } static void input_start_autorepeat(struct input_dev *dev, int code) { if (test_bit(EV_REP, dev->evbit) && dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] && dev->timer.data) { dev->repeat_key = code; mod_timer(&dev->timer, jiffies + msecs_to_jiffies(dev->rep[REP_DELAY])); } } static void input_stop_autorepeat(struct input_dev *dev) { del_timer(&dev->timer); } #define INPUT_IGNORE_EVENT 0 #define INPUT_PASS_TO_HANDLERS 1 #define INPUT_PASS_TO_DEVICE 2 #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) static void input_handle_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { int disposition = INPUT_IGNORE_EVENT; switch (type) { case EV_SYN: switch (code) { case SYN_CONFIG: disposition = INPUT_PASS_TO_ALL; break; case SYN_REPORT: if (!dev->sync) { dev->sync = 1; disposition = INPUT_PASS_TO_HANDLERS; } break; case SYN_MT_REPORT: dev->sync = 0; disposition = INPUT_PASS_TO_HANDLERS; break; } break; case EV_KEY: if (is_event_supported(code, dev->keybit, KEY_MAX) && !!test_bit(code, dev->key) != value) { if (value != 2) { __change_bit(code, dev->key); if (value) input_start_autorepeat(dev, code); else input_stop_autorepeat(dev); } disposition = INPUT_PASS_TO_HANDLERS; } break; case EV_SW: if (is_event_supported(code, dev->swbit, SW_MAX) && !!test_bit(code, dev->sw) != value) { __change_bit(code, dev->sw); disposition = INPUT_PASS_TO_HANDLERS; } break; case EV_ABS: if (is_event_supported(code, dev->absbit, ABS_MAX)) { if (test_bit(code, input_abs_bypass)) { disposition = INPUT_PASS_TO_HANDLERS; break; } value = input_defuzz_abs_event(value, dev->abs[code], dev->absfuzz[code]); if (dev->abs[code] != value) { dev->abs[code] = value; disposition = INPUT_PASS_TO_HANDLERS; } } break; case EV_REL: if (is_event_supported(code, dev->relbit, REL_MAX) && value) disposition = INPUT_PASS_TO_HANDLERS; break; case EV_MSC: if (is_event_supported(code, dev->mscbit, MSC_MAX)) disposition = INPUT_PASS_TO_ALL; break; case EV_LED: if (is_event_supported(code, dev->ledbit, LED_MAX) && !!test_bit(code, dev->led) != value) { __change_bit(code, dev->led); disposition = INPUT_PASS_TO_ALL; } break; case EV_SND: if (is_event_supported(code, dev->sndbit, SND_MAX)) { if (!!test_bit(code, dev->snd) != !!value) __change_bit(code, dev->snd); disposition = INPUT_PASS_TO_ALL; } break; case EV_REP: if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) { dev->rep[code] = value; disposition = INPUT_PASS_TO_ALL; } break; case EV_FF: if (value >= 0) disposition = INPUT_PASS_TO_ALL; break; case EV_PWR: disposition = INPUT_PASS_TO_ALL; break; } if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN) dev->sync = 0; if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) dev->event(dev, type, code, value); if (disposition & INPUT_PASS_TO_HANDLERS) input_pass_event(dev, type, code, value); } /** * input_event() - report new input event * @dev: device that generated the event * @type: type of the event * @code: event code * @value: value of the event * * This function should be used by drivers implementing various input * devices. See also input_inject_event(). */ void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { unsigned long flags; if (is_event_supported(type, dev->evbit, EV_MAX)) { spin_lock_irqsave(&dev->event_lock, flags); add_input_randomness(type, code, value); input_handle_event(dev, type, code, value); spin_unlock_irqrestore(&dev->event_lock, flags); } } EXPORT_SYMBOL(input_event); /** * input_inject_event() - send input event from input handler * @handle: input handle to send event through * @type: type of the event * @code: event code * @value: value of the event * * Similar to input_event() but will ignore event if device is * "grabbed" and handle injecting event is not the one that owns * the device. */ void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) { struct input_dev *dev = handle->dev; struct input_handle *grab; unsigned long flags; if (is_event_supported(type, dev->evbit, EV_MAX)) { spin_lock_irqsave(&dev->event_lock, flags); rcu_read_lock(); grab = rcu_dereference(dev->grab); if (!grab || grab == handle) input_handle_event(dev, type, code, value); rcu_read_unlock(); spin_unlock_irqrestore(&dev->event_lock, flags); } } EXPORT_SYMBOL(input_inject_event); /** * input_grab_device - grabs device for exclusive use * @handle: input handle that wants to own the device * * When a device is grabbed by an input handle all events generated by * the device are delivered only to this handle. Also events injected * by other input handles are ignored while device is grabbed. */ int input_grab_device(struct input_handle *handle) { struct input_dev *dev = handle->dev; int retval; retval = mutex_lock_interruptible(&dev->mutex); if (retval) return retval; if (dev->grab) { retval = -EBUSY; goto out; } rcu_assign_pointer(dev->grab, handle); synchronize_rcu(); out: mutex_unlock(&dev->mutex); return retval; } EXPORT_SYMBOL(input_grab_device); static void __input_release_device(struct input_handle *handle) { struct input_dev *dev = handle->dev; if (dev->grab == handle) { rcu_assign_pointer(dev->grab, NULL); /* Make sure input_pass_event() notices that grab is gone */ synchronize_rcu(); list_for_each_entry(handle, &dev->h_list, d_node) if (handle->open && handle->handler->start) handle->handler->start(handle); } } /** * input_release_device - release previously grabbed device * @handle: input handle that owns the device * * Releases previously grabbed device so that other input handles can * start receiving input events. Upon release all handlers attached * to the device have their start() method called so they have a change * to synchronize device state with the rest of the system. */ void input_release_device(struct input_handle *handle) { struct input_dev *dev = handle->dev; mutex_lock(&dev->mutex); __input_release_device(handle); mutex_unlock(&dev->mutex); } EXPORT_SYMBOL(input_release_device); /** * input_open_device - open input device * @handle: handle through which device is being accessed * * This function should be called by input handlers when they * want to start receive events from given input device. */ int input_open_device(struct input_handle *handle) { struct input_dev *dev = handle->dev; int retval; retval = mutex_lock_interruptible(&dev->mutex); if (retval) return retval; if (dev->going_away) { retval = -ENODEV; goto out; } handle->open++; if (!dev->users++ && dev->open) retval = dev->open(dev); if (retval) { dev->users--; if (!--handle->open) { /* * Make sure we are not delivering any more events * through this handle */ synchronize_rcu(); } } out: mutex_unlock(&dev->mutex); return retval; } EXPORT_SYMBOL(input_open_device); int input_flush_device(struct input_handle *handle, struct file *file) { struct input_dev *dev = handle->dev; int retval; retval = mutex_lock_interruptible(&dev->mutex); if (retval) return retval; if (dev->flush) retval = dev->flush(dev, file); mutex_unlock(&dev->mutex); return retval; } EXPORT_SYMBOL(input_flush_device); /** * input_close_device - close input device * @handle: handle through which device is being accessed * * This function should be called by input handlers when they * want to stop receive events from given input device. */ void input_close_device(struct input_handle *handle) { struct input_dev *dev = handle->dev; mutex_lock(&dev->mutex); __input_release_device(handle); if (!--dev->users && dev->close) dev->close(dev); if (!--handle->open) { /* * synchronize_rcu() makes sure that input_pass_event() * completed and that no more input events are delivered * through this handle */ synchronize_rcu(); } mutex_unlock(&dev->mutex); } EXPORT_SYMBOL(input_close_device); /* * Prepare device for unregistering */ static void input_disconnect_device(struct input_dev *dev) { struct input_handle *handle; int code; /* * Mark device as going away. Note that we take dev->mutex here * not to protect access to dev->going_away but rather to ensure * that there are no threads in the middle of input_open_device() */ mutex_lock(&dev->mutex); dev->going_away = true; mutex_unlock(&dev->mutex); spin_lock_irq(&dev->event_lock); /* * Simulate keyup events for all pressed keys so that handlers * are not left with "stuck" keys. The driver may continue * generate events even after we done here but they will not * reach any handlers. */ if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) { for (code = 0; code <= KEY_MAX; code++) { if (is_event_supported(code, dev->keybit, KEY_MAX) && __test_and_clear_bit(code, dev->key)) { input_pass_event(dev, EV_KEY, code, 0); } } input_pass_event(dev, EV_SYN, SYN_REPORT, 1); } list_for_each_entry(handle, &dev->h_list, d_node) handle->open = 0; spin_unlock_irq(&dev->event_lock); } static int input_fetch_keycode(struct input_dev *dev, int scancode) { switch (dev->keycodesize) { case 1: return ((u8 *)dev->keycode)[scancode]; case 2: return ((u16 *)dev->keycode)[scancode]; default: return ((u32 *)dev->keycode)[scancode]; } } static int input_default_getkeycode(struct input_dev *dev, int scancode, int *keycode) { if (!dev->keycodesize) return -EINVAL; if (scancode >= dev->keycodemax) return -EINVAL; *keycode = input_fetch_keycode(dev, scancode); return 0; } static int input_default_setkeycode(struct input_dev *dev, int scancode, int keycode) { int old_keycode; int i; if (scancode >= dev->keycodemax) return -EINVAL; if (!dev->keycodesize) return -EINVAL; if (dev->keycodesize < sizeof(keycode) && (keycode >> (dev->keycodesize * 8))) return -EINVAL; switch (dev->keycodesize) { case 1: { u8 *k = (u8 *)dev->keycode; old_keycode = k[scancode]; k[scancode] = keycode; break; } case 2: { u16 *k = (u16 *)dev->keycode; old_keycode = k[scancode]; k[scancode] = keycode; break; } default: { u32 *k = (u32 *)dev->keycode; old_keycode = k[scancode]; k[scancode] = keycode; break; } } clear_bit(old_keycode, dev->keybit); set_bit(keycode, dev->keybit); for (i = 0; i < dev->keycodemax; i++) { if (input_fetch_keycode(dev, i) == old_keycode) { set_bit(old_keycode, dev->keybit); break; /* Setting the bit twice is useless, so break */ } } return 0; } /** * input_get_keycode - retrieve keycode currently mapped to a given scancode * @dev: input device which keymap is being queried * @scancode: scancode (or its equivalent for device in question) for which * keycode is needed * @keycode: result * * This function should be called by anyone interested in retrieving current * keymap. Presently keyboard and evdev handlers use it. */ int input_get_keycode(struct input_dev *dev, int scancode, int *keycode) { if (scancode < 0) return -EINVAL; return dev->getkeycode(dev, scancode, keycode); } EXPORT_SYMBOL(input_get_keycode); /** * input_get_keycode - assign new keycode to a given scancode * @dev: input device which keymap is being updated * @scancode: scancode (or its equivalent for device in question) * @keycode: new keycode to be assigned to the scancode * * This function should be called by anyone needing to update current * keymap. Presently keyboard and evdev handlers use it. */ int input_set_keycode(struct input_dev *dev, int scancode, int keycode) { unsigned long flags; int old_keycode; int retval; if (scancode < 0) return -EINVAL; if (keycode < 0 || keycode > KEY_MAX) return -EINVAL; spin_lock_irqsave(&dev->event_lock, flags); retval = dev->getkeycode(dev, scancode, &old_keycode); if (retval) goto out; retval = dev->setkeycode(dev, scancode, keycode); if (retval) goto out; /* * Simulate keyup event if keycode is not present * in the keymap anymore */ if (test_bit(EV_KEY, dev->evbit) && !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && __test_and_clear_bit(old_keycode, dev->key)) { input_pass_event(dev, EV_KEY, old_keycode, 0); if (dev->sync) input_pass_event(dev, EV_SYN, SYN_REPORT, 1); } out: spin_unlock_irqrestore(&dev->event_lock, flags); return retval; } EXPORT_SYMBOL(input_set_keycode); #define MATCH_BIT(bit, max) \ for (i = 0; i < BITS_TO_LONGS(max); i++) \ if ((id->bit[i] & dev->bit[i]) != id->bit[i]) \ break; \ if (i != BITS_TO_LONGS(max)) \ continue; static const struct input_device_id *input_match_device(const struct input_device_id *id, struct input_dev *dev) { int i; for (; id->flags || id->driver_info; id++) { if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) if (id->bustype != dev->id.bustype) continue; if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR) if (id->vendor != dev->id.vendor) continue; if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT) if (id->product != dev->id.product) continue; if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION) if (id->version != dev->id.version) continue; MATCH_BIT(evbit, EV_MAX); MATCH_BIT(keybit, KEY_MAX); MATCH_BIT(relbit, REL_MAX); MATCH_BIT(absbit, ABS_MAX); MATCH_BIT(mscbit, MSC_MAX); MATCH_BIT(ledbit, LED_MAX); MATCH_BIT(sndbit, SND_MAX); MATCH_BIT(ffbit, FF_MAX); MATCH_BIT(swbit, SW_MAX); return id; } return NULL; } static int input_attach_handler(struct input_dev *dev, struct input_handler *handler) { const struct input_device_id *id; int error; if (handler->blacklist && input_match_device(handler->blacklist, dev)) return -ENODEV; id = input_match_device(handler->id_table, dev); if (!id) return -ENODEV; error = handler->connect(handler, dev, id); if (error && error != -ENODEV) printk(KERN_ERR "input: failed to attach handler %s to device %s, " "error: %d\n", handler->name, kobject_name(&dev->dev.kobj), error); return error; } #ifdef CONFIG_COMPAT static int input_bits_to_string(char *buf, int buf_size, unsigned long bits, bool skip_empty) { int len = 0; if (INPUT_COMPAT_TEST) { u32 dword = bits >> 32; if (dword || !skip_empty) len += snprintf(buf, buf_size, "%x ", dword); dword = bits & 0xffffffffUL; if (dword || !skip_empty || len) len += snprintf(buf + len, max(buf_size - len, 0), "%x", dword); } else { if (bits || !skip_empty) len += snprintf(buf, buf_size, "%lx", bits); } return len; } #else /* !CONFIG_COMPAT */ static int input_bits_to_string(char *buf, int buf_size, unsigned long bits, bool skip_empty) { return bits || !skip_empty ? snprintf(buf, buf_size, "%lx", bits) : 0; } #endif #ifdef CONFIG_PROC_FS static struct proc_dir_entry *proc_bus_input_dir; static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait); static int input_devices_state; static inline void input_wakeup_procfs_readers(void) { input_devices_state++; wake_up(&input_devices_poll_wait); } static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait) { poll_wait(file, &input_devices_poll_wait, wait); if (file->f_version != input_devices_state) { file->f_version = input_devices_state; return POLLIN | POLLRDNORM; } return 0; } union input_seq_state { struct { unsigned short pos; bool mutex_acquired; }; void *p; }; static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos) { union input_seq_state *state = (union input_seq_state *)&seq->private; int error; /* We need to fit into seq->private pointer */ BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); error = mutex_lock_interruptible(&input_mutex); if (error) { state->mutex_acquired = false; return ERR_PTR(error); } state->mutex_acquired = true; return seq_list_start(&input_dev_list, *pos); } static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_list_next(v, &input_dev_list, pos); } static void input_seq_stop(struct seq_file *seq, void *v) { union input_seq_state *state = (union input_seq_state *)&seq->private; if (state->mutex_acquired) mutex_unlock(&input_mutex); } static void input_seq_print_bitmap(struct seq_file *seq, const char *name, unsigned long *bitmap, int max) { int i; bool skip_empty = true; char buf[18]; seq_printf(seq, "B: %s=", name); for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { if (input_bits_to_string(buf, sizeof(buf), bitmap[i], skip_empty)) { skip_empty = false; seq_printf(seq, "%s%s", buf, i > 0 ? " " : ""); } } /* * If no output was produced print a single 0. */ if (skip_empty) seq_puts(seq, "0"); seq_putc(seq, '\n'); } static int input_devices_seq_show(struct seq_file *seq, void *v) { struct input_dev *dev = container_of(v, struct input_dev, node); const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); struct input_handle *handle; seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n", dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version); seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : ""); seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : ""); seq_printf(seq, "S: Sysfs=%s\n", path ? path : ""); seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : ""); seq_printf(seq, "H: Handlers="); list_for_each_entry(handle, &dev->h_list, d_node) seq_printf(seq, "%s ", handle->name); seq_putc(seq, '\n'); input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX); if (test_bit(EV_KEY, dev->evbit)) input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX); if (test_bit(EV_REL, dev->evbit)) input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX); if (test_bit(EV_ABS, dev->evbit)) input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX); if (test_bit(EV_MSC, dev->evbit)) input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX); if (test_bit(EV_LED, dev->evbit)) input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX); if (test_bit(EV_SND, dev->evbit)) input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX); if (test_bit(EV_FF, dev->evbit)) input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX); if (test_bit(EV_SW, dev->evbit)) input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX); seq_putc(seq, '\n'); kfree(path); return 0; } static const struct seq_operations input_devices_seq_ops = { .start = input_devices_seq_start, .next = input_devices_seq_next, .stop = input_seq_stop, .show = input_devices_seq_show, }; static int input_proc_devices_open(struct inode *inode, struct file *file) { return seq_open(file, &input_devices_seq_ops); } static const struct file_operations input_devices_fileops = { .owner = THIS_MODULE, .open = input_proc_devices_open, .poll = input_proc_devices_poll, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos) { union input_seq_state *state = (union input_seq_state *)&seq->private; int error; /* We need to fit into seq->private pointer */ BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); error = mutex_lock_interruptible(&input_mutex); if (error) { state->mutex_acquired = false; return ERR_PTR(error); } state->mutex_acquired = true; state->pos = *pos; return seq_list_start(&input_handler_list, *pos); } static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos) { union input_seq_state *state = (union input_seq_state *)&seq->private; state->pos = *pos + 1; return seq_list_next(v, &input_handler_list, pos); } static int input_handlers_seq_show(struct seq_file *seq, void *v) { struct input_handler *handler = container_of(v, struct input_handler, node); union input_seq_state *state = (union input_seq_state *)&seq->private; seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name); if (handler->fops) seq_printf(seq, " Minor=%d", handler->minor); seq_putc(seq, '\n'); return 0; } static const struct seq_operations input_handlers_seq_ops = { .start = input_handlers_seq_start, .next = input_handlers_seq_next, .stop = input_seq_stop, .show = input_handlers_seq_show, }; static int input_proc_handlers_open(struct inode *inode, struct file *file) { return seq_open(file, &input_handlers_seq_ops); } static const struct file_operations input_handlers_fileops = { .owner = THIS_MODULE, .open = input_proc_handlers_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init input_proc_init(void) { struct proc_dir_entry *entry; proc_bus_input_dir = proc_mkdir("bus/input", NULL); if (!proc_bus_input_dir) return -ENOMEM; entry = proc_create("devices", 0, proc_bus_input_dir, &input_devices_fileops); if (!entry) goto fail1; entry = proc_create("handlers", 0, proc_bus_input_dir, &input_handlers_fileops); if (!entry) goto fail2; return 0; fail2: remove_proc_entry("devices", proc_bus_input_dir); fail1: remove_proc_entry("bus/input", NULL); return -ENOMEM; } static void input_proc_exit(void) { remove_proc_entry("devices", proc_bus_input_dir); remove_proc_entry("handlers", proc_bus_input_dir); remove_proc_entry("bus/input", NULL); } #else /* !CONFIG_PROC_FS */ static inline void input_wakeup_procfs_readers(void) { } static inline int input_proc_init(void) { return 0; } static inline void input_proc_exit(void) { } #endif #define INPUT_DEV_STRING_ATTR_SHOW(name) \ static ssize_t input_dev_show_##name(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct input_dev *input_dev = to_input_dev(dev); \ \ return scnprintf(buf, PAGE_SIZE, "%s\n", \ input_dev->name ? input_dev->name : ""); \ } \ static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL) INPUT_DEV_STRING_ATTR_SHOW(name); INPUT_DEV_STRING_ATTR_SHOW(phys); INPUT_DEV_STRING_ATTR_SHOW(uniq); static int input_print_modalias_bits(char *buf, int size, char name, unsigned long *bm, unsigned int min_bit, unsigned int max_bit) { int len = 0, i; len += snprintf(buf, max(size, 0), "%c", name); for (i = min_bit; i < max_bit; i++) if (bm[BIT_WORD(i)] & BIT_MASK(i)) len += snprintf(buf + len, max(size - len, 0), "%X,", i); return len; } static int input_print_modalias(char *buf, int size, struct input_dev *id, int add_cr) { int len; len = snprintf(buf, max(size, 0), "input:b%04Xv%04Xp%04Xe%04X-", id->id.bustype, id->id.vendor, id->id.product, id->id.version); len += input_print_modalias_bits(buf + len, size - len, 'e', id->evbit, 0, EV_MAX); len += input_print_modalias_bits(buf + len, size - len, 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX); len += input_print_modalias_bits(buf + len, size - len, 'r', id->relbit, 0, REL_MAX); len += input_print_modalias_bits(buf + len, size - len, 'a', id->absbit, 0, ABS_MAX); len += input_print_modalias_bits(buf + len, size - len, 'm', id->mscbit, 0, MSC_MAX); len += input_print_modalias_bits(buf + len, size - len, 'l', id->ledbit, 0, LED_MAX); len += input_print_modalias_bits(buf + len, size - len, 's', id->sndbit, 0, SND_MAX); len += input_print_modalias_bits(buf + len, size - len, 'f', id->ffbit, 0, FF_MAX); len += input_print_modalias_bits(buf + len, size - len, 'w', id->swbit, 0, SW_MAX); if (add_cr) len += snprintf(buf + len, max(size - len, 0), "\n"); return len; } static ssize_t input_dev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf) { struct input_dev *id = to_input_dev(dev); ssize_t len; len = input_print_modalias(buf, PAGE_SIZE, id, 1); return min_t(int, len, PAGE_SIZE); } static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL); static struct attribute *input_dev_attrs[] = { &dev_attr_name.attr, &dev_attr_phys.attr, &dev_attr_uniq.attr, &dev_attr_modalias.attr, NULL }; static struct attribute_group input_dev_attr_group = { .attrs = input_dev_attrs, }; #define INPUT_DEV_ID_ATTR(name) \ static ssize_t input_dev_show_id_##name(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct input_dev *input_dev = to_input_dev(dev); \ return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name); \ } \ static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL) INPUT_DEV_ID_ATTR(bustype); INPUT_DEV_ID_ATTR(vendor); INPUT_DEV_ID_ATTR(product); INPUT_DEV_ID_ATTR(version); static struct attribute *input_dev_id_attrs[] = { &dev_attr_bustype.attr, &dev_attr_vendor.attr, &dev_attr_product.attr, &dev_attr_version.attr, NULL }; static struct attribute_group input_dev_id_attr_group = { .name = "id", .attrs = input_dev_id_attrs, }; static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap, int max, int add_cr) { int i; int len = 0; bool skip_empty = true; for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { len += input_bits_to_string(buf + len, max(buf_size - len, 0), bitmap[i], skip_empty); if (len) { skip_empty = false; if (i > 0) len += snprintf(buf + len, max(buf_size - len, 0), " "); } } /* * If no output was produced print a single 0. */ if (len == 0) len = snprintf(buf, buf_size, "%d", 0); if (add_cr) len += snprintf(buf + len, max(buf_size - len, 0), "\n"); return len; } #define INPUT_DEV_CAP_ATTR(ev, bm) \ static ssize_t input_dev_show_cap_##bm(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct input_dev *input_dev = to_input_dev(dev); \ int len = input_print_bitmap(buf, PAGE_SIZE, \ input_dev->bm##bit, ev##_MAX, \ true); \ return min_t(int, len, PAGE_SIZE); \ } \ static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL) INPUT_DEV_CAP_ATTR(EV, ev); INPUT_DEV_CAP_ATTR(KEY, key); INPUT_DEV_CAP_ATTR(REL, rel); INPUT_DEV_CAP_ATTR(ABS, abs); INPUT_DEV_CAP_ATTR(MSC, msc); INPUT_DEV_CAP_ATTR(LED, led); INPUT_DEV_CAP_ATTR(SND, snd); INPUT_DEV_CAP_ATTR(FF, ff); INPUT_DEV_CAP_ATTR(SW, sw); static struct attribute *input_dev_caps_attrs[] = { &dev_attr_ev.attr, &dev_attr_key.attr, &dev_attr_rel.attr, &dev_attr_abs.attr, &dev_attr_msc.attr, &dev_attr_led.attr, &dev_attr_snd.attr, &dev_attr_ff.attr, &dev_attr_sw.attr, NULL }; static struct attribute_group input_dev_caps_attr_group = { .name = "capabilities", .attrs = input_dev_caps_attrs, }; static const struct attribute_group *input_dev_attr_groups[] = { &input_dev_attr_group, &input_dev_id_attr_group, &input_dev_caps_attr_group, NULL }; static void input_dev_release(struct device *device) { struct input_dev *dev = to_input_dev(device); input_ff_destroy(dev); kfree(dev); module_put(THIS_MODULE); } /* * Input uevent interface - loading event handlers based on * device bitfields. */ static int input_add_uevent_bm_var(struct kobj_uevent_env *env, const char *name, unsigned long *bitmap, int max) { int len; if (add_uevent_var(env, "%s=", name)) return -ENOMEM; len = input_print_bitmap(&env->buf[env->buflen - 1], sizeof(env->buf) - env->buflen, bitmap, max, false); if (len >= (sizeof(env->buf) - env->buflen)) return -ENOMEM; env->buflen += len; return 0; } static int input_add_uevent_modalias_var(struct kobj_uevent_env *env, struct input_dev *dev) { int len; if (add_uevent_var(env, "MODALIAS=")) return -ENOMEM; len = input_print_modalias(&env->buf[env->buflen - 1], sizeof(env->buf) - env->buflen, dev, 0); if (len >= (sizeof(env->buf) - env->buflen)) return -ENOMEM; env->buflen += len; return 0; } #define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \ do { \ int err = add_uevent_var(env, fmt, val); \ if (err) \ return err; \ } while (0) #define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \ do { \ int err = input_add_uevent_bm_var(env, name, bm, max); \ if (err) \ return err; \ } while (0) #define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \ do { \ int err = input_add_uevent_modalias_var(env, dev); \ if (err) \ return err; \ } while (0) static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env) { struct input_dev *dev = to_input_dev(device); INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x", dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version); if (dev->name) INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name); if (dev->phys) INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys); if (dev->uniq) INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq); INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX); if (test_bit(EV_KEY, dev->evbit)) INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX); if (test_bit(EV_REL, dev->evbit)) INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX); if (test_bit(EV_ABS, dev->evbit)) INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX); if (test_bit(EV_MSC, dev->evbit)) INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX); if (test_bit(EV_LED, dev->evbit)) INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX); if (test_bit(EV_SND, dev->evbit)) INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX); if (test_bit(EV_FF, dev->evbit)) INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX); if (test_bit(EV_SW, dev->evbit)) INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX); INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev); return 0; } #define INPUT_DO_TOGGLE(dev, type, bits, on) \ do { \ int i; \ bool active; \ \ if (!test_bit(EV_##type, dev->evbit)) \ break; \ \ for (i = 0; i < type##_MAX; i++) { \ if (!test_bit(i, dev->bits##bit)) \ continue; \ \ active = test_bit(i, dev->bits); \ if (!active && !on) \ continue; \ \ dev->event(dev, EV_##type, i, on ? active : 0); \ } \ } while (0) #ifdef CONFIG_PM static void input_dev_reset(struct input_dev *dev, bool activate) { if (!dev->event) return; INPUT_DO_TOGGLE(dev, LED, led, activate); INPUT_DO_TOGGLE(dev, SND, snd, activate); if (activate && test_bit(EV_REP, dev->evbit)) { dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]); dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]); } } static int input_dev_suspend(struct device *dev) { struct input_dev *input_dev = to_input_dev(dev); mutex_lock(&input_dev->mutex); input_dev_reset(input_dev, false); mutex_unlock(&input_dev->mutex); return 0; } static int input_dev_resume(struct device *dev) { struct input_dev *input_dev = to_input_dev(dev); mutex_lock(&input_dev->mutex); input_dev_reset(input_dev, true); mutex_unlock(&input_dev->mutex); return 0; } static const struct dev_pm_ops input_dev_pm_ops = { .suspend = input_dev_suspend, .resume = input_dev_resume, .poweroff = input_dev_suspend, .restore = input_dev_resume, }; #endif /* CONFIG_PM */ static struct device_type input_dev_type = { .groups = input_dev_attr_groups, .release = input_dev_release, .uevent = input_dev_uevent, #ifdef CONFIG_PM .pm = &input_dev_pm_ops, #endif }; static char *input_devnode(struct device *dev, mode_t *mode) { return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev)); } struct class input_class = { .name = "input", .devnode = input_devnode, }; EXPORT_SYMBOL_GPL(input_class); /** * input_allocate_device - allocate memory for new input device * * Returns prepared struct input_dev or NULL. * * NOTE: Use input_free_device() to free devices that have not been * registered; input_unregister_device() should be used for already * registered devices. */ struct input_dev *input_allocate_device(void) { struct input_dev *dev; dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL); if (dev) { dev->dev.type = &input_dev_type; dev->dev.class = &input_class; device_initialize(&dev->dev); mutex_init(&dev->mutex); spin_lock_init(&dev->event_lock); INIT_LIST_HEAD(&dev->h_list); INIT_LIST_HEAD(&dev->node); __module_get(THIS_MODULE); } return dev; } EXPORT_SYMBOL(input_allocate_device); /** * input_free_device - free memory occupied by input_dev structure * @dev: input device to free * * This function should only be used if input_register_device() * was not called yet or if it failed. Once device was registered * use input_unregister_device() and memory will be freed once last * reference to the device is dropped. * * Device should be allocated by input_allocate_device(). * * NOTE: If there are references to the input device then memory * will not be freed until last reference is dropped. */ void input_free_device(struct input_dev *dev) { if (dev) input_put_device(dev); } EXPORT_SYMBOL(input_free_device); /** * input_set_capability - mark device as capable of a certain event * @dev: device that is capable of emitting or accepting event * @type: type of the event (EV_KEY, EV_REL, etc...) * @code: event code * * In addition to setting up corresponding bit in appropriate capability * bitmap the function also adjusts dev->evbit. */ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code) { switch (type) { case EV_KEY: __set_bit(code, dev->keybit); break; case EV_REL: __set_bit(code, dev->relbit); break; case EV_ABS: __set_bit(code, dev->absbit); break; case EV_MSC: __set_bit(code, dev->mscbit); break; case EV_SW: __set_bit(code, dev->swbit); break; case EV_LED: __set_bit(code, dev->ledbit); break; case EV_SND: __set_bit(code, dev->sndbit); break; case EV_FF: __set_bit(code, dev->ffbit); break; case EV_PWR: /* do nothing */ break; default: printk(KERN_ERR "input_set_capability: unknown type %u (code %u)\n", type, code); dump_stack(); return; } __set_bit(type, dev->evbit); } EXPORT_SYMBOL(input_set_capability); /** * input_register_device - register device with input core * @dev: device to be registered * * This function registers device with input core. The device must be * allocated with input_allocate_device() and all it's capabilities * set up before registering. * If function fails the device must be freed with input_free_device(). * Once device has been successfully registered it can be unregistered * with input_unregister_device(); input_free_device() should not be * called in this case. */ int input_register_device(struct input_dev *dev) { static atomic_t input_no = ATOMIC_INIT(0); struct input_handler *handler; const char *path; int error; __set_bit(EV_SYN, dev->evbit); /* * If delay and period are pre-set by the driver, then autorepeating * is handled by the driver itself and we don't do it in input.c. */ init_timer(&dev->timer); if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) { dev->timer.data = (long) dev; dev->timer.function = input_repeat_key; dev->rep[REP_DELAY] = 250; dev->rep[REP_PERIOD] = 33; } if (!dev->getkeycode) dev->getkeycode = input_default_getkeycode; if (!dev->setkeycode) dev->setkeycode = input_default_setkeycode; dev_set_name(&dev->dev, "input%ld", (unsigned long) atomic_inc_return(&input_no) - 1); error = device_add(&dev->dev); if (error) return error; path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); printk(KERN_INFO "input: %s as %s\n", dev->name ? dev->name : "Unspecified device", path ? path : "N/A"); kfree(path); error = mutex_lock_interruptible(&input_mutex); if (error) { device_del(&dev->dev); return error; } list_add_tail(&dev->node, &input_dev_list); list_for_each_entry(handler, &input_handler_list, node) input_attach_handler(dev, handler); input_wakeup_procfs_readers(); mutex_unlock(&input_mutex); return 0; } EXPORT_SYMBOL(input_register_device); /** * input_unregister_device - unregister previously registered device * @dev: device to be unregistered * * This function unregisters an input device. Once device is unregistered * the caller should not try to access it as it may get freed at any moment. */ void input_unregister_device(struct input_dev *dev) { struct input_handle *handle, *next; input_disconnect_device(dev); mutex_lock(&input_mutex); list_for_each_entry_safe(handle, next, &dev->h_list, d_node) handle->handler->disconnect(handle); WARN_ON(!list_empty(&dev->h_list)); del_timer_sync(&dev->timer); list_del_init(&dev->node); input_wakeup_procfs_readers(); mutex_unlock(&input_mutex); device_unregister(&dev->dev); } EXPORT_SYMBOL(input_unregister_device); /** * input_register_handler - register a new input handler * @handler: handler to be registered * * This function registers a new input handler (interface) for input * devices in the system and attaches it to all input devices that * are compatible with the handler. */ int input_register_handler(struct input_handler *handler) { struct input_dev *dev; int retval; retval = mutex_lock_interruptible(&input_mutex); if (retval) return retval; INIT_LIST_HEAD(&handler->h_list); if (handler->fops != NULL) { if (input_table[handler->minor >> 5]) { retval = -EBUSY; goto out; } input_table[handler->minor >> 5] = handler; } list_add_tail(&handler->node, &input_handler_list); list_for_each_entry(dev, &input_dev_list, node) input_attach_handler(dev, handler); input_wakeup_procfs_readers(); out: mutex_unlock(&input_mutex); return retval; } EXPORT_SYMBOL(input_register_handler); /** * input_unregister_handler - unregisters an input handler * @handler: handler to be unregistered * * This function disconnects a handler from its input devices and * removes it from lists of known handlers. */ void input_unregister_handler(struct input_handler *handler) { struct input_handle *handle, *next; mutex_lock(&input_mutex); list_for_each_entry_safe(handle, next, &handler->h_list, h_node) handler->disconnect(handle); WARN_ON(!list_empty(&handler->h_list)); list_del_init(&handler->node); if (handler->fops != NULL) input_table[handler->minor >> 5] = NULL; input_wakeup_procfs_readers(); mutex_unlock(&input_mutex); } EXPORT_SYMBOL(input_unregister_handler); /** * input_register_handle - register a new input handle * @handle: handle to register * * This function puts a new input handle onto device's * and handler's lists so that events can flow through * it once it is opened using input_open_device(). * * This function is supposed to be called from handler's * connect() method. */ int input_register_handle(struct input_handle *handle) { struct input_handler *handler = handle->handler; struct input_dev *dev = handle->dev; int error; /* * We take dev->mutex here to prevent race with * input_release_device(). */ error = mutex_lock_interruptible(&dev->mutex); if (error) return error; list_add_tail_rcu(&handle->d_node, &dev->h_list); mutex_unlock(&dev->mutex); /* * Since we are supposed to be called from ->connect() * which is mutually exclusive with ->disconnect() * we can't be racing with input_unregister_handle() * and so separate lock is not needed here. */ list_add_tail(&handle->h_node, &handler->h_list); if (handler->start) handler->start(handle); return 0; } EXPORT_SYMBOL(input_register_handle); /** * input_unregister_handle - unregister an input handle * @handle: handle to unregister * * This function removes input handle from device's * and handler's lists. * * This function is supposed to be called from handler's * disconnect() method. */ void input_unregister_handle(struct input_handle *handle) { struct input_dev *dev = handle->dev; list_del_init(&handle->h_node); /* * Take dev->mutex to prevent race with input_release_device(). */ mutex_lock(&dev->mutex); list_del_rcu(&handle->d_node); mutex_unlock(&dev->mutex); synchronize_rcu(); } EXPORT_SYMBOL(input_unregister_handle); static int input_open_file(struct inode *inode, struct file *file) { struct input_handler *handler; const struct file_operations *old_fops, *new_fops = NULL; int err; lock_kernel(); /* No load-on-demand here? */ handler = input_table[iminor(inode) >> 5]; if (!handler || !(new_fops = fops_get(handler->fops))) { err = -ENODEV; goto out; } /* * That's _really_ odd. Usually NULL ->open means "nothing special", * not "no device". Oh, well... */ if (!new_fops->open) { fops_put(new_fops); err = -ENODEV; goto out; } old_fops = file->f_op; file->f_op = new_fops; err = new_fops->open(inode, file); if (err) { fops_put(file->f_op); file->f_op = fops_get(old_fops); } fops_put(old_fops); out: unlock_kernel(); return err; } static const struct file_operations input_fops = { .owner = THIS_MODULE, .open = input_open_file, }; static void __init input_init_abs_bypass(void) { const unsigned int *p; for (p = input_abs_bypass_init_data; *p; p++) input_abs_bypass[BIT_WORD(*p)] |= BIT_MASK(*p); } static int __init input_init(void) { int err; input_init_abs_bypass(); err = class_register(&input_class); if (err) { printk(KERN_ERR "input: unable to register input_dev class\n"); return err; } err = input_proc_init(); if (err) goto fail1; err = register_chrdev(INPUT_MAJOR, "input", &input_fops); if (err) { printk(KERN_ERR "input: unable to register char major %d", INPUT_MAJOR); goto fail2; } return 0; fail2: input_proc_exit(); fail1: class_unregister(&input_class); return err; } static void __exit input_exit(void) { input_proc_exit(); unregister_chrdev(INPUT_MAJOR, "input"); class_unregister(&input_class); } subsys_initcall(input_init); module_exit(input_exit);
gpl-2.0
ulrikdb/linux
sound/pci/ice1712/quartet.c
328
30692
/* * ALSA driver for ICEnsemble VT1724 (Envy24HT) * * Lowlevel functions for Infrasonic Quartet * * Copyright (c) 2009 Pavel Hofman <pavel.hofman@ivitera.com> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/tlv.h> #include <sound/info.h> #include "ice1712.h" #include "envy24ht.h" #include <sound/ak4113.h> #include "quartet.h" struct qtet_spec { struct ak4113 *ak4113; unsigned int scr; /* system control register */ unsigned int mcr; /* monitoring control register */ unsigned int cpld; /* cpld register */ }; struct qtet_kcontrol_private { unsigned int bit; void (*set_register)(struct snd_ice1712 *ice, unsigned int val); unsigned int (*get_register)(struct snd_ice1712 *ice); unsigned char * const texts[2]; }; enum { IN12_SEL = 0, IN34_SEL, AIN34_SEL, COAX_OUT, IN12_MON12, IN12_MON34, IN34_MON12, IN34_MON34, OUT12_MON34, OUT34_MON12, }; static const char * const ext_clock_names[3] = {"IEC958 In", "Word Clock 1xFS", "Word Clock 256xFS"}; /* chip address on I2C bus */ #define AK4113_ADDR 0x26 /* S/PDIF receiver */ /* chip address on SPI bus */ #define AK4620_ADDR 0x02 /* ADC/DAC */ /* * GPIO pins */ /* GPIO0 - O - DATA0, def. 0 */ #define GPIO_D0 (1<<0) /* GPIO1 - I/O - DATA1, Jack Detect Input0 (0:present, 1:missing), def. 1 */ #define GPIO_D1_JACKDTC0 (1<<1) /* GPIO2 - I/O - DATA2, Jack Detect Input1 (0:present, 1:missing), def. 1 */ #define GPIO_D2_JACKDTC1 (1<<2) /* GPIO3 - I/O - DATA3, def. 1 */ #define GPIO_D3 (1<<3) /* GPIO4 - I/O - DATA4, SPI CDTO, def. 1 */ #define GPIO_D4_SPI_CDTO (1<<4) /* GPIO5 - I/O - DATA5, SPI CCLK, def. 1 */ #define GPIO_D5_SPI_CCLK (1<<5) /* GPIO6 - I/O - DATA6, Cable Detect Input (0:detected, 1:not detected */ #define GPIO_D6_CD (1<<6) /* GPIO7 - I/O - DATA7, Device Detect Input (0:detected, 1:not detected */ #define GPIO_D7_DD (1<<7) /* GPIO8 - O - CPLD Chip Select, def. 1 */ #define GPIO_CPLD_CSN (1<<8) /* GPIO9 - O - CPLD register read/write (0:write, 1:read), def. 0 */ #define GPIO_CPLD_RW (1<<9) /* GPIO10 - O - SPI Chip Select for CODEC#0, def. 1 */ #define GPIO_SPI_CSN0 (1<<10) /* GPIO11 - O - SPI Chip Select for CODEC#1, def. 1 */ #define GPIO_SPI_CSN1 (1<<11) /* GPIO12 - O - Ex. Register Output Enable (0:enable, 1:disable), def. 1, * init 0 */ #define GPIO_EX_GPIOE (1<<12) /* GPIO13 - O - Ex. Register0 Chip Select for System Control Register, * def. 1 */ #define GPIO_SCR (1<<13) /* GPIO14 - O - Ex. Register1 Chip Select for Monitor Control Register, * def. 1 */ #define GPIO_MCR (1<<14) #define GPIO_SPI_ALL (GPIO_D4_SPI_CDTO | GPIO_D5_SPI_CCLK |\ GPIO_SPI_CSN0 | GPIO_SPI_CSN1) #define GPIO_DATA_MASK (GPIO_D0 | GPIO_D1_JACKDTC0 | \ GPIO_D2_JACKDTC1 | GPIO_D3 | \ GPIO_D4_SPI_CDTO | GPIO_D5_SPI_CCLK | \ GPIO_D6_CD | GPIO_D7_DD) /* System Control Register GPIO_SCR data bits */ /* Mic/Line select relay (0:line, 1:mic) */ #define SCR_RELAY GPIO_D0 /* Phantom power drive control (0:5V, 1:48V) */ #define SCR_PHP_V GPIO_D1_JACKDTC0 /* H/W mute control (0:Normal, 1:Mute) */ #define SCR_MUTE GPIO_D2_JACKDTC1 /* Phantom power control (0:Phantom on, 1:off) */ #define SCR_PHP GPIO_D3 /* Analog input 1/2 Source Select */ #define SCR_AIN12_SEL0 GPIO_D4_SPI_CDTO #define SCR_AIN12_SEL1 GPIO_D5_SPI_CCLK /* Analog input 3/4 Source Select (0:line, 1:hi-z) */ #define SCR_AIN34_SEL GPIO_D6_CD /* Codec Power Down (0:power down, 1:normal) */ #define SCR_CODEC_PDN GPIO_D7_DD #define SCR_AIN12_LINE (0) #define SCR_AIN12_MIC (SCR_AIN12_SEL0) #define SCR_AIN12_LOWCUT (SCR_AIN12_SEL1 | SCR_AIN12_SEL0) /* Monitor Control Register GPIO_MCR data bits */ /* Input 1/2 to Monitor 1/2 (0:off, 1:on) */ #define MCR_IN12_MON12 GPIO_D0 /* Input 1/2 to Monitor 3/4 (0:off, 1:on) */ #define MCR_IN12_MON34 GPIO_D1_JACKDTC0 /* Input 3/4 to Monitor 1/2 (0:off, 1:on) */ #define MCR_IN34_MON12 GPIO_D2_JACKDTC1 /* Input 3/4 to Monitor 3/4 (0:off, 1:on) */ #define MCR_IN34_MON34 GPIO_D3 /* Output to Monitor 1/2 (0:off, 1:on) */ #define MCR_OUT34_MON12 GPIO_D4_SPI_CDTO /* Output to Monitor 3/4 (0:off, 1:on) */ #define MCR_OUT12_MON34 GPIO_D5_SPI_CCLK /* CPLD Register DATA bits */ /* Clock Rate Select */ #define CPLD_CKS0 GPIO_D0 #define CPLD_CKS1 GPIO_D1_JACKDTC0 #define CPLD_CKS2 GPIO_D2_JACKDTC1 /* Sync Source Select (0:Internal, 1:External) */ #define CPLD_SYNC_SEL GPIO_D3 /* Word Clock FS Select (0:FS, 1:256FS) */ #define CPLD_WORD_SEL GPIO_D4_SPI_CDTO /* Coaxial Output Source (IS-Link) (0:SPDIF, 1:I2S) */ #define CPLD_COAX_OUT GPIO_D5_SPI_CCLK /* Input 1/2 Source Select (0:Analog12, 1:An34) */ #define CPLD_IN12_SEL GPIO_D6_CD /* Input 3/4 Source Select (0:Analog34, 1:Digital In) */ #define CPLD_IN34_SEL GPIO_D7_DD /* internal clock (CPLD_SYNC_SEL = 0) options */ #define CPLD_CKS_44100HZ (0) #define CPLD_CKS_48000HZ (CPLD_CKS0) #define CPLD_CKS_88200HZ (CPLD_CKS1) #define CPLD_CKS_96000HZ (CPLD_CKS1 | CPLD_CKS0) #define CPLD_CKS_176400HZ (CPLD_CKS2) #define CPLD_CKS_192000HZ (CPLD_CKS2 | CPLD_CKS0) #define CPLD_CKS_MASK (CPLD_CKS0 | CPLD_CKS1 | CPLD_CKS2) /* external clock (CPLD_SYNC_SEL = 1) options */ /* external clock - SPDIF */ #define CPLD_EXT_SPDIF (0 | CPLD_SYNC_SEL) /* external clock - WordClock 1xfs */ #define CPLD_EXT_WORDCLOCK_1FS (CPLD_CKS1 | CPLD_SYNC_SEL) /* external clock - WordClock 256xfs */ #define CPLD_EXT_WORDCLOCK_256FS (CPLD_CKS1 | CPLD_WORD_SEL |\ CPLD_SYNC_SEL) #define EXT_SPDIF_TYPE 0 #define EXT_WORDCLOCK_1FS_TYPE 1 #define EXT_WORDCLOCK_256FS_TYPE 2 #define AK4620_DFS0 (1<<0) #define AK4620_DFS1 (1<<1) #define AK4620_CKS0 (1<<2) #define AK4620_CKS1 (1<<3) /* Clock and Format Control register */ #define AK4620_DFS_REG 0x02 /* Deem and Volume Control register */ #define AK4620_DEEMVOL_REG 0x03 #define AK4620_SMUTE (1<<7) #ifdef CONFIG_PROC_FS /* * Conversion from int value to its binary form. Used for debugging. * The output buffer must be allocated prior to calling the function. */ static char *get_binary(char *buffer, int value) { int i, j, pos; pos = 0; for (i = 0; i < 4; ++i) { for (j = 0; j < 8; ++j) { if (value & (1 << (31-(i*8 + j)))) buffer[pos] = '1'; else buffer[pos] = '0'; pos++; } if (i < 3) { buffer[pos] = ' '; pos++; } } buffer[pos] = '\0'; return buffer; } #endif /* CONFIG_PROC_FS */ /* * Initial setup of the conversion array GPIO <-> rate */ static unsigned int qtet_rates[] = { 44100, 48000, 88200, 96000, 176400, 192000, }; static unsigned int cks_vals[] = { CPLD_CKS_44100HZ, CPLD_CKS_48000HZ, CPLD_CKS_88200HZ, CPLD_CKS_96000HZ, CPLD_CKS_176400HZ, CPLD_CKS_192000HZ, }; static struct snd_pcm_hw_constraint_list qtet_rates_info = { .count = ARRAY_SIZE(qtet_rates), .list = qtet_rates, .mask = 0, }; static void qtet_ak4113_write(void *private_data, unsigned char reg, unsigned char val) { snd_vt1724_write_i2c((struct snd_ice1712 *)private_data, AK4113_ADDR, reg, val); } static unsigned char qtet_ak4113_read(void *private_data, unsigned char reg) { return snd_vt1724_read_i2c((struct snd_ice1712 *)private_data, AK4113_ADDR, reg); } /* * AK4620 section */ /* * Write data to addr register of ak4620 */ static void qtet_akm_write(struct snd_akm4xxx *ak, int chip, unsigned char addr, unsigned char data) { unsigned int tmp, orig_dir; int idx; unsigned int addrdata; struct snd_ice1712 *ice = ak->private_data[0]; if (snd_BUG_ON(chip < 0 || chip >= 4)) return; /*printk(KERN_DEBUG "Writing to AK4620: chip=%d, addr=0x%x, data=0x%x\n", chip, addr, data);*/ orig_dir = ice->gpio.get_dir(ice); ice->gpio.set_dir(ice, orig_dir | GPIO_SPI_ALL); /* set mask - only SPI bits */ ice->gpio.set_mask(ice, ~GPIO_SPI_ALL); tmp = ice->gpio.get_data(ice); /* high all */ tmp |= GPIO_SPI_ALL; ice->gpio.set_data(ice, tmp); udelay(100); /* drop chip select */ if (chip) /* CODEC 1 */ tmp &= ~GPIO_SPI_CSN1; else tmp &= ~GPIO_SPI_CSN0; ice->gpio.set_data(ice, tmp); udelay(100); /* build I2C address + data byte */ addrdata = (AK4620_ADDR << 6) | 0x20 | (addr & 0x1f); addrdata = (addrdata << 8) | data; for (idx = 15; idx >= 0; idx--) { /* drop clock */ tmp &= ~GPIO_D5_SPI_CCLK; ice->gpio.set_data(ice, tmp); udelay(100); /* set data */ if (addrdata & (1 << idx)) tmp |= GPIO_D4_SPI_CDTO; else tmp &= ~GPIO_D4_SPI_CDTO; ice->gpio.set_data(ice, tmp); udelay(100); /* raise clock */ tmp |= GPIO_D5_SPI_CCLK; ice->gpio.set_data(ice, tmp); udelay(100); } /* all back to 1 */ tmp |= GPIO_SPI_ALL; ice->gpio.set_data(ice, tmp); udelay(100); /* return all gpios to non-writable */ ice->gpio.set_mask(ice, 0xffffff); /* restore GPIOs direction */ ice->gpio.set_dir(ice, orig_dir); } static void qtet_akm_set_regs(struct snd_akm4xxx *ak, unsigned char addr, unsigned char mask, unsigned char value) { unsigned char tmp; int chip; for (chip = 0; chip < ak->num_chips; chip++) { tmp = snd_akm4xxx_get(ak, chip, addr); /* clear the bits */ tmp &= ~mask; /* set the new bits */ tmp |= value; snd_akm4xxx_write(ak, chip, addr, tmp); } } /* * change the rate of AK4620 */ static void qtet_akm_set_rate_val(struct snd_akm4xxx *ak, unsigned int rate) { unsigned char ak4620_dfs; if (rate == 0) /* no hint - S/PDIF input is master or the new spdif input rate undetected, simply return */ return; /* adjust DFS on codecs - see datasheet */ if (rate > 108000) ak4620_dfs = AK4620_DFS1 | AK4620_CKS1; else if (rate > 54000) ak4620_dfs = AK4620_DFS0 | AK4620_CKS0; else ak4620_dfs = 0; /* set new value */ qtet_akm_set_regs(ak, AK4620_DFS_REG, AK4620_DFS0 | AK4620_DFS1 | AK4620_CKS0 | AK4620_CKS1, ak4620_dfs); } #define AK_CONTROL(xname, xch) { .name = xname, .num_channels = xch } #define PCM_12_PLAYBACK_VOLUME "PCM 1/2 Playback Volume" #define PCM_34_PLAYBACK_VOLUME "PCM 3/4 Playback Volume" #define PCM_12_CAPTURE_VOLUME "PCM 1/2 Capture Volume" #define PCM_34_CAPTURE_VOLUME "PCM 3/4 Capture Volume" static const struct snd_akm4xxx_dac_channel qtet_dac[] = { AK_CONTROL(PCM_12_PLAYBACK_VOLUME, 2), AK_CONTROL(PCM_34_PLAYBACK_VOLUME, 2), }; static const struct snd_akm4xxx_adc_channel qtet_adc[] = { AK_CONTROL(PCM_12_CAPTURE_VOLUME, 2), AK_CONTROL(PCM_34_CAPTURE_VOLUME, 2), }; static struct snd_akm4xxx akm_qtet_dac = { .type = SND_AK4620, .num_dacs = 4, /* DAC1 - Output 12 */ .num_adcs = 4, /* ADC1 - Input 12 */ .ops = { .write = qtet_akm_write, .set_rate_val = qtet_akm_set_rate_val, }, .dac_info = qtet_dac, .adc_info = qtet_adc, }; /* Communication routines with the CPLD */ /* Writes data to external register reg, both reg and data are * GPIO representations */ static void reg_write(struct snd_ice1712 *ice, unsigned int reg, unsigned int data) { unsigned int tmp; mutex_lock(&ice->gpio_mutex); /* set direction of used GPIOs*/ /* all outputs */ tmp = 0x00ffff; ice->gpio.set_dir(ice, tmp); /* mask - writable bits */ ice->gpio.set_mask(ice, ~(tmp)); /* write the data */ tmp = ice->gpio.get_data(ice); tmp &= ~GPIO_DATA_MASK; tmp |= data; ice->gpio.set_data(ice, tmp); udelay(100); /* drop output enable */ tmp &= ~GPIO_EX_GPIOE; ice->gpio.set_data(ice, tmp); udelay(100); /* drop the register gpio */ tmp &= ~reg; ice->gpio.set_data(ice, tmp); udelay(100); /* raise the register GPIO */ tmp |= reg; ice->gpio.set_data(ice, tmp); udelay(100); /* raise all data gpios */ tmp |= GPIO_DATA_MASK; ice->gpio.set_data(ice, tmp); /* mask - immutable bits */ ice->gpio.set_mask(ice, 0xffffff); /* outputs only 8-15 */ ice->gpio.set_dir(ice, 0x00ff00); mutex_unlock(&ice->gpio_mutex); } static unsigned int get_scr(struct snd_ice1712 *ice) { struct qtet_spec *spec = ice->spec; return spec->scr; } static unsigned int get_mcr(struct snd_ice1712 *ice) { struct qtet_spec *spec = ice->spec; return spec->mcr; } static unsigned int get_cpld(struct snd_ice1712 *ice) { struct qtet_spec *spec = ice->spec; return spec->cpld; } static void set_scr(struct snd_ice1712 *ice, unsigned int val) { struct qtet_spec *spec = ice->spec; reg_write(ice, GPIO_SCR, val); spec->scr = val; } static void set_mcr(struct snd_ice1712 *ice, unsigned int val) { struct qtet_spec *spec = ice->spec; reg_write(ice, GPIO_MCR, val); spec->mcr = val; } static void set_cpld(struct snd_ice1712 *ice, unsigned int val) { struct qtet_spec *spec = ice->spec; reg_write(ice, GPIO_CPLD_CSN, val); spec->cpld = val; } #ifdef CONFIG_PROC_FS static void proc_regs_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ice1712 *ice = entry->private_data; char bin_buffer[36]; snd_iprintf(buffer, "SCR: %s\n", get_binary(bin_buffer, get_scr(ice))); snd_iprintf(buffer, "MCR: %s\n", get_binary(bin_buffer, get_mcr(ice))); snd_iprintf(buffer, "CPLD: %s\n", get_binary(bin_buffer, get_cpld(ice))); } static void proc_init(struct snd_ice1712 *ice) { struct snd_info_entry *entry; if (!snd_card_proc_new(ice->card, "quartet", &entry)) snd_info_set_text_ops(entry, ice, proc_regs_read); } #else /* !CONFIG_PROC_FS */ static void proc_init(struct snd_ice1712 *ice) {} #endif static int qtet_mute_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int val; val = get_scr(ice) & SCR_MUTE; ucontrol->value.integer.value[0] = (val) ? 0 : 1; return 0; } static int qtet_mute_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int old, new, smute; old = get_scr(ice) & SCR_MUTE; if (ucontrol->value.integer.value[0]) { /* unmute */ new = 0; /* un-smuting DAC */ smute = 0; } else { /* mute */ new = SCR_MUTE; /* smuting DAC */ smute = AK4620_SMUTE; } if (old != new) { struct snd_akm4xxx *ak = ice->akm; set_scr(ice, (get_scr(ice) & ~SCR_MUTE) | new); /* set smute */ qtet_akm_set_regs(ak, AK4620_DEEMVOL_REG, AK4620_SMUTE, smute); return 1; } /* no change */ return 0; } static int qtet_ain12_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static const char * const texts[3] = {"Line In 1/2", "Mic", "Mic + Low-cut"}; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = ARRAY_SIZE(texts); if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int qtet_ain12_sw_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int val, result; val = get_scr(ice) & (SCR_AIN12_SEL1 | SCR_AIN12_SEL0); switch (val) { case SCR_AIN12_LINE: result = 0; break; case SCR_AIN12_MIC: result = 1; break; case SCR_AIN12_LOWCUT: result = 2; break; default: /* BUG - no other combinations allowed */ snd_BUG(); result = 0; } ucontrol->value.integer.value[0] = result; return 0; } static int qtet_ain12_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int old, new, tmp, masked_old; old = new = get_scr(ice); masked_old = old & (SCR_AIN12_SEL1 | SCR_AIN12_SEL0); tmp = ucontrol->value.integer.value[0]; if (tmp == 2) tmp = 3; /* binary 10 is not supported */ tmp <<= 4; /* shifting to SCR_AIN12_SEL0 */ if (tmp != masked_old) { /* change requested */ switch (tmp) { case SCR_AIN12_LINE: new = old & ~(SCR_AIN12_SEL1 | SCR_AIN12_SEL0); set_scr(ice, new); /* turn off relay */ new &= ~SCR_RELAY; set_scr(ice, new); break; case SCR_AIN12_MIC: /* turn on relay */ new = old | SCR_RELAY; set_scr(ice, new); new = (new & ~SCR_AIN12_SEL1) | SCR_AIN12_SEL0; set_scr(ice, new); break; case SCR_AIN12_LOWCUT: /* turn on relay */ new = old | SCR_RELAY; set_scr(ice, new); new |= SCR_AIN12_SEL1 | SCR_AIN12_SEL0; set_scr(ice, new); break; default: snd_BUG(); } return 1; } /* no change */ return 0; } static int qtet_php_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int val; /* if phantom voltage =48V, phantom on */ val = get_scr(ice) & SCR_PHP_V; ucontrol->value.integer.value[0] = val ? 1 : 0; return 0; } static int qtet_php_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int old, new; old = new = get_scr(ice); if (ucontrol->value.integer.value[0] /* phantom on requested */ && (~old & SCR_PHP_V)) /* 0 = voltage 5V */ { /* is off, turn on */ /* turn voltage on first, = 1 */ new = old | SCR_PHP_V; set_scr(ice, new); /* turn phantom on, = 0 */ new &= ~SCR_PHP; set_scr(ice, new); } else if (!ucontrol->value.integer.value[0] && (old & SCR_PHP_V)) { /* phantom off requested and 1 = voltage 48V */ /* is on, turn off */ /* turn voltage off first, = 0 */ new = old & ~SCR_PHP_V; set_scr(ice, new); /* turn phantom off, = 1 */ new |= SCR_PHP; set_scr(ice, new); } if (old != new) return 1; /* no change */ return 0; } #define PRIV_SW(xid, xbit, xreg) [xid] = {.bit = xbit,\ .set_register = set_##xreg,\ .get_register = get_##xreg, } #define PRIV_ENUM2(xid, xbit, xreg, xtext1, xtext2) [xid] = {.bit = xbit,\ .set_register = set_##xreg,\ .get_register = get_##xreg,\ .texts = {xtext1, xtext2} } static struct qtet_kcontrol_private qtet_privates[] = { PRIV_ENUM2(IN12_SEL, CPLD_IN12_SEL, cpld, "An In 1/2", "An In 3/4"), PRIV_ENUM2(IN34_SEL, CPLD_IN34_SEL, cpld, "An In 3/4", "IEC958 In"), PRIV_ENUM2(AIN34_SEL, SCR_AIN34_SEL, scr, "Line In 3/4", "Hi-Z"), PRIV_ENUM2(COAX_OUT, CPLD_COAX_OUT, cpld, "IEC958", "I2S"), PRIV_SW(IN12_MON12, MCR_IN12_MON12, mcr), PRIV_SW(IN12_MON34, MCR_IN12_MON34, mcr), PRIV_SW(IN34_MON12, MCR_IN34_MON12, mcr), PRIV_SW(IN34_MON34, MCR_IN34_MON34, mcr), PRIV_SW(OUT12_MON34, MCR_OUT12_MON34, mcr), PRIV_SW(OUT34_MON12, MCR_OUT34_MON12, mcr), }; static int qtet_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct qtet_kcontrol_private private = qtet_privates[kcontrol->private_value]; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = ARRAY_SIZE(private.texts); if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; strcpy(uinfo->value.enumerated.name, private.texts[uinfo->value.enumerated.item]); return 0; } static int qtet_sw_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct qtet_kcontrol_private private = qtet_privates[kcontrol->private_value]; struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = (private.get_register(ice) & private.bit) ? 1 : 0; return 0; } static int qtet_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct qtet_kcontrol_private private = qtet_privates[kcontrol->private_value]; struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int old, new; old = private.get_register(ice); if (ucontrol->value.integer.value[0]) new = old | private.bit; else new = old & ~private.bit; if (old != new) { private.set_register(ice, new); return 1; } /* no change */ return 0; } #define qtet_sw_info snd_ctl_boolean_mono_info #define QTET_CONTROL(xname, xtype, xpriv) \ {.iface = SNDRV_CTL_ELEM_IFACE_MIXER,\ .name = xname,\ .info = qtet_##xtype##_info,\ .get = qtet_sw_get,\ .put = qtet_sw_put,\ .private_value = xpriv } static struct snd_kcontrol_new qtet_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Master Playback Switch", .info = qtet_sw_info, .get = qtet_mute_get, .put = qtet_mute_put, .private_value = 0 }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Phantom Power", .info = qtet_sw_info, .get = qtet_php_get, .put = qtet_php_put, .private_value = 0 }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog In 1/2 Capture Switch", .info = qtet_ain12_enum_info, .get = qtet_ain12_sw_get, .put = qtet_ain12_sw_put, .private_value = 0 }, QTET_CONTROL("Analog In 3/4 Capture Switch", enum, AIN34_SEL), QTET_CONTROL("PCM In 1/2 Capture Switch", enum, IN12_SEL), QTET_CONTROL("PCM In 3/4 Capture Switch", enum, IN34_SEL), QTET_CONTROL("Coax Output Source", enum, COAX_OUT), QTET_CONTROL("Analog In 1/2 to Monitor 1/2", sw, IN12_MON12), QTET_CONTROL("Analog In 1/2 to Monitor 3/4", sw, IN12_MON34), QTET_CONTROL("Analog In 3/4 to Monitor 1/2", sw, IN34_MON12), QTET_CONTROL("Analog In 3/4 to Monitor 3/4", sw, IN34_MON34), QTET_CONTROL("Output 1/2 to Monitor 3/4", sw, OUT12_MON34), QTET_CONTROL("Output 3/4 to Monitor 1/2", sw, OUT34_MON12), }; static char *slave_vols[] = { PCM_12_PLAYBACK_VOLUME, PCM_34_PLAYBACK_VOLUME, NULL }; static DECLARE_TLV_DB_SCALE(qtet_master_db_scale, -6350, 50, 1); static struct snd_kcontrol *ctl_find(struct snd_card *card, const char *name) { struct snd_ctl_elem_id sid; memset(&sid, 0, sizeof(sid)); /* FIXME: strcpy is bad. */ strcpy(sid.name, name); sid.iface = SNDRV_CTL_ELEM_IFACE_MIXER; return snd_ctl_find_id(card, &sid); } static void add_slaves(struct snd_card *card, struct snd_kcontrol *master, char * const *list) { for (; *list; list++) { struct snd_kcontrol *slave = ctl_find(card, *list); if (slave) snd_ctl_add_slave(master, slave); } } static int qtet_add_controls(struct snd_ice1712 *ice) { struct qtet_spec *spec = ice->spec; int err, i; struct snd_kcontrol *vmaster; err = snd_ice1712_akm4xxx_build_controls(ice); if (err < 0) return err; for (i = 0; i < ARRAY_SIZE(qtet_controls); i++) { err = snd_ctl_add(ice->card, snd_ctl_new1(&qtet_controls[i], ice)); if (err < 0) return err; } /* Create virtual master control */ vmaster = snd_ctl_make_virtual_master("Master Playback Volume", qtet_master_db_scale); if (!vmaster) return -ENOMEM; add_slaves(ice->card, vmaster, slave_vols); err = snd_ctl_add(ice->card, vmaster); if (err < 0) return err; /* only capture SPDIF over AK4113 */ err = snd_ak4113_build(spec->ak4113, ice->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream); if (err < 0) return err; return 0; } static inline int qtet_is_spdif_master(struct snd_ice1712 *ice) { /* CPLD_SYNC_SEL: 0 = internal, 1 = external (i.e. spdif master) */ return (get_cpld(ice) & CPLD_SYNC_SEL) ? 1 : 0; } static unsigned int qtet_get_rate(struct snd_ice1712 *ice) { int i; unsigned char result; result = get_cpld(ice) & CPLD_CKS_MASK; for (i = 0; i < ARRAY_SIZE(cks_vals); i++) if (cks_vals[i] == result) return qtet_rates[i]; return 0; } static int get_cks_val(int rate) { int i; for (i = 0; i < ARRAY_SIZE(qtet_rates); i++) if (qtet_rates[i] == rate) return cks_vals[i]; return 0; } /* setting new rate */ static void qtet_set_rate(struct snd_ice1712 *ice, unsigned int rate) { unsigned int new; unsigned char val; /* switching ice1724 to external clock - supplied by ext. circuits */ val = inb(ICEMT1724(ice, RATE)); outb(val | VT1724_SPDIF_MASTER, ICEMT1724(ice, RATE)); new = (get_cpld(ice) & ~CPLD_CKS_MASK) | get_cks_val(rate); /* switch to internal clock, drop CPLD_SYNC_SEL */ new &= ~CPLD_SYNC_SEL; /* printk(KERN_DEBUG "QT - set_rate: old %x, new %x\n", get_cpld(ice), new); */ set_cpld(ice, new); } static inline unsigned char qtet_set_mclk(struct snd_ice1712 *ice, unsigned int rate) { /* no change in master clock */ return 0; } /* setting clock to external - SPDIF */ static int qtet_set_spdif_clock(struct snd_ice1712 *ice, int type) { unsigned int old, new; old = new = get_cpld(ice); new &= ~(CPLD_CKS_MASK | CPLD_WORD_SEL); switch (type) { case EXT_SPDIF_TYPE: new |= CPLD_EXT_SPDIF; break; case EXT_WORDCLOCK_1FS_TYPE: new |= CPLD_EXT_WORDCLOCK_1FS; break; case EXT_WORDCLOCK_256FS_TYPE: new |= CPLD_EXT_WORDCLOCK_256FS; break; default: snd_BUG(); } if (old != new) { set_cpld(ice, new); /* changed */ return 1; } return 0; } static int qtet_get_spdif_master_type(struct snd_ice1712 *ice) { unsigned int val; int result; val = get_cpld(ice); /* checking only rate/clock-related bits */ val &= (CPLD_CKS_MASK | CPLD_WORD_SEL | CPLD_SYNC_SEL); if (!(val & CPLD_SYNC_SEL)) { /* switched to internal clock, is not any external type */ result = -1; } else { switch (val) { case (CPLD_EXT_SPDIF): result = EXT_SPDIF_TYPE; break; case (CPLD_EXT_WORDCLOCK_1FS): result = EXT_WORDCLOCK_1FS_TYPE; break; case (CPLD_EXT_WORDCLOCK_256FS): result = EXT_WORDCLOCK_256FS_TYPE; break; default: /* undefined combination of external clock setup */ snd_BUG(); result = 0; } } return result; } /* Called when ak4113 detects change in the input SPDIF stream */ static void qtet_ak4113_change(struct ak4113 *ak4113, unsigned char c0, unsigned char c1) { struct snd_ice1712 *ice = ak4113->change_callback_private; int rate; if ((qtet_get_spdif_master_type(ice) == EXT_SPDIF_TYPE) && c1) { /* only for SPDIF master mode, rate was changed */ rate = snd_ak4113_external_rate(ak4113); /* printk(KERN_DEBUG "ak4113 - input rate changed to %d\n", rate); */ qtet_akm_set_rate_val(ice->akm, rate); } } /* * If clock slaved to SPDIF-IN, setting runtime rate * to the detected external rate */ static void qtet_spdif_in_open(struct snd_ice1712 *ice, struct snd_pcm_substream *substream) { struct qtet_spec *spec = ice->spec; struct snd_pcm_runtime *runtime = substream->runtime; int rate; if (qtet_get_spdif_master_type(ice) != EXT_SPDIF_TYPE) /* not external SPDIF, no rate limitation */ return; /* only external SPDIF can detect incoming sample rate */ rate = snd_ak4113_external_rate(spec->ak4113); if (rate >= runtime->hw.rate_min && rate <= runtime->hw.rate_max) { runtime->hw.rate_min = rate; runtime->hw.rate_max = rate; } } /* * initialize the chip */ static int qtet_init(struct snd_ice1712 *ice) { static const unsigned char ak4113_init_vals[] = { /* AK4113_REG_PWRDN */ AK4113_RST | AK4113_PWN | AK4113_OCKS0 | AK4113_OCKS1, /* AK4113_REQ_FORMAT */ AK4113_DIF_I24I2S | AK4113_VTX | AK4113_DEM_OFF | AK4113_DEAU, /* AK4113_REG_IO0 */ AK4113_OPS2 | AK4113_TXE | AK4113_XTL_24_576M, /* AK4113_REG_IO1 */ AK4113_EFH_1024LRCLK | AK4113_IPS(0), /* AK4113_REG_INT0_MASK */ 0, /* AK4113_REG_INT1_MASK */ 0, /* AK4113_REG_DATDTS */ 0, }; int err; struct qtet_spec *spec; struct snd_akm4xxx *ak; unsigned char val; /* switching ice1724 to external clock - supplied by ext. circuits */ val = inb(ICEMT1724(ice, RATE)); outb(val | VT1724_SPDIF_MASTER, ICEMT1724(ice, RATE)); spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; /* qtet is clocked by Xilinx array */ ice->hw_rates = &qtet_rates_info; ice->is_spdif_master = qtet_is_spdif_master; ice->get_rate = qtet_get_rate; ice->set_rate = qtet_set_rate; ice->set_mclk = qtet_set_mclk; ice->set_spdif_clock = qtet_set_spdif_clock; ice->get_spdif_master_type = qtet_get_spdif_master_type; ice->ext_clock_names = ext_clock_names; ice->ext_clock_count = ARRAY_SIZE(ext_clock_names); /* since Qtet can detect correct SPDIF-in rate, all streams can be * limited to this specific rate */ ice->spdif.ops.open = ice->pro_open = qtet_spdif_in_open; ice->spec = spec; /* Mute Off */ /* SCR Initialize*/ /* keep codec power down first */ set_scr(ice, SCR_PHP); udelay(1); /* codec power up */ set_scr(ice, SCR_PHP | SCR_CODEC_PDN); /* MCR Initialize */ set_mcr(ice, 0); /* CPLD Initialize */ set_cpld(ice, 0); ice->num_total_dacs = 2; ice->num_total_adcs = 2; ice->akm = kcalloc(2, sizeof(struct snd_akm4xxx), GFP_KERNEL); ak = ice->akm; if (!ak) return -ENOMEM; /* only one codec with two chips */ ice->akm_codecs = 1; err = snd_ice1712_akm4xxx_init(ak, &akm_qtet_dac, NULL, ice); if (err < 0) return err; err = snd_ak4113_create(ice->card, qtet_ak4113_read, qtet_ak4113_write, ak4113_init_vals, ice, &spec->ak4113); if (err < 0) return err; /* callback for codecs rate setting */ spec->ak4113->change_callback = qtet_ak4113_change; spec->ak4113->change_callback_private = ice; /* AK41143 in Quartet can detect external rate correctly * (i.e. check_flags = 0) */ spec->ak4113->check_flags = 0; proc_init(ice); qtet_set_rate(ice, 44100); return 0; } static unsigned char qtet_eeprom[] = { [ICE_EEP2_SYSCONF] = 0x28, /* clock 256(24MHz), mpu401, 1xADC, 1xDACs, SPDIF in */ [ICE_EEP2_ACLINK] = 0x80, /* I2S */ [ICE_EEP2_I2S] = 0x78, /* 96k, 24bit, 192k */ [ICE_EEP2_SPDIF] = 0xc3, /* out-en, out-int, in, out-ext */ [ICE_EEP2_GPIO_DIR] = 0x00, /* 0-7 inputs, switched to output only during output operations */ [ICE_EEP2_GPIO_DIR1] = 0xff, /* 8-15 outputs */ [ICE_EEP2_GPIO_DIR2] = 0x00, [ICE_EEP2_GPIO_MASK] = 0xff, /* changed only for OUT operations */ [ICE_EEP2_GPIO_MASK1] = 0x00, [ICE_EEP2_GPIO_MASK2] = 0xff, [ICE_EEP2_GPIO_STATE] = 0x00, /* inputs */ [ICE_EEP2_GPIO_STATE1] = 0x7d, /* all 1, but GPIO_CPLD_RW and GPIO15 always zero */ [ICE_EEP2_GPIO_STATE2] = 0x00, /* inputs */ }; /* entry point */ struct snd_ice1712_card_info snd_vt1724_qtet_cards[] = { { .subvendor = VT1724_SUBDEVICE_QTET, .name = "Infrasonic Quartet", .model = "quartet", .chip_init = qtet_init, .build_controls = qtet_add_controls, .eeprom_size = sizeof(qtet_eeprom), .eeprom_data = qtet_eeprom, }, { } /* terminator */ };
gpl-2.0
atgreen/busybox-moxie
e2fsprogs/old_e2fsprogs/ext2fs/brel_ma.c
328
4189
/* vi: set sw=4 ts=4: */ /* * brel_ma.c * * Copyright (C) 1996, 1997 Theodore Ts'o. * * TODO: rewrite to not use a direct array!!! (Fortunately this * module isn't really used yet.) * * %Begin-Header% * This file may be redistributed under the terms of the GNU Public * License. * %End-Header% */ #include <fcntl.h> #include <stdio.h> #include <string.h> #if HAVE_UNISTD_H #include <unistd.h> #endif #if HAVE_ERRNO_H #include <errno.h> #endif #include "ext2_fs.h" #include "ext2fs.h" #include "brel.h" static errcode_t bma_put(ext2_brel brel, blk_t old, struct ext2_block_relocate_entry *ent); static errcode_t bma_get(ext2_brel brel, blk_t old, struct ext2_block_relocate_entry *ent); static errcode_t bma_start_iter(ext2_brel brel); static errcode_t bma_next(ext2_brel brel, blk_t *old, struct ext2_block_relocate_entry *ent); static errcode_t bma_move(ext2_brel brel, blk_t old, blk_t new); static errcode_t bma_delete(ext2_brel brel, blk_t old); static errcode_t bma_free(ext2_brel brel); struct brel_ma { __u32 magic; blk_t max_block; struct ext2_block_relocate_entry *entries; }; errcode_t ext2fs_brel_memarray_create(char *name, blk_t max_block, ext2_brel *new_brel) { ext2_brel brel = 0; errcode_t retval; struct brel_ma *ma = 0; size_t size; *new_brel = 0; /* * Allocate memory structures */ retval = ext2fs_get_mem(sizeof(struct ext2_block_relocation_table), &brel); if (retval) goto errout; memset(brel, 0, sizeof(struct ext2_block_relocation_table)); retval = ext2fs_get_mem(strlen(name)+1, &brel->name); if (retval) goto errout; strcpy(brel->name, name); retval = ext2fs_get_mem(sizeof(struct brel_ma), &ma); if (retval) goto errout; memset(ma, 0, sizeof(struct brel_ma)); brel->priv_data = ma; size = (size_t) (sizeof(struct ext2_block_relocate_entry) * (max_block+1)); retval = ext2fs_get_mem(size, &ma->entries); if (retval) goto errout; memset(ma->entries, 0, size); ma->max_block = max_block; /* * Fill in the brel data structure */ brel->put = bma_put; brel->get = bma_get; brel->start_iter = bma_start_iter; brel->next = bma_next; brel->move = bma_move; brel->delete = bma_delete; brel->free = bma_free; *new_brel = brel; return 0; errout: bma_free(brel); return retval; } static errcode_t bma_put(ext2_brel brel, blk_t old, struct ext2_block_relocate_entry *ent) { struct brel_ma *ma; ma = brel->priv_data; if (old > ma->max_block) return EXT2_ET_INVALID_ARGUMENT; ma->entries[(unsigned)old] = *ent; return 0; } static errcode_t bma_get(ext2_brel brel, blk_t old, struct ext2_block_relocate_entry *ent) { struct brel_ma *ma; ma = brel->priv_data; if (old > ma->max_block) return EXT2_ET_INVALID_ARGUMENT; if (ma->entries[(unsigned)old].new == 0) return ENOENT; *ent = ma->entries[old]; return 0; } static errcode_t bma_start_iter(ext2_brel brel) { brel->current = 0; return 0; } static errcode_t bma_next(ext2_brel brel, blk_t *old, struct ext2_block_relocate_entry *ent) { struct brel_ma *ma; ma = brel->priv_data; while (++brel->current < ma->max_block) { if (ma->entries[(unsigned)brel->current].new == 0) continue; *old = brel->current; *ent = ma->entries[(unsigned)brel->current]; return 0; } *old = 0; return 0; } static errcode_t bma_move(ext2_brel brel, blk_t old, blk_t new) { struct brel_ma *ma; ma = brel->priv_data; if ((old > ma->max_block) || (new > ma->max_block)) return EXT2_ET_INVALID_ARGUMENT; if (ma->entries[(unsigned)old].new == 0) return ENOENT; ma->entries[(unsigned)new] = ma->entries[old]; ma->entries[(unsigned)old].new = 0; return 0; } static errcode_t bma_delete(ext2_brel brel, blk_t old) { struct brel_ma *ma; ma = brel->priv_data; if (old > ma->max_block) return EXT2_ET_INVALID_ARGUMENT; if (ma->entries[(unsigned)old].new == 0) return ENOENT; ma->entries[(unsigned)old].new = 0; return 0; } static errcode_t bma_free(ext2_brel brel) { struct brel_ma *ma; if (!brel) return 0; ma = brel->priv_data; if (ma) { ext2fs_free_mem(&ma->entries); ext2fs_free_mem(&ma); } ext2fs_free_mem(&brel->name); ext2fs_free_mem(&brel); return 0; }
gpl-2.0
binkybear/furnace-bacon
drivers/gpu/drm/radeon/radeon_pm.c
328
27573
/* * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Rafał Miłecki <zajec5@gmail.com> * Alex Deucher <alexdeucher@gmail.com> */ #include "drmP.h" #include "radeon.h" #include "avivod.h" #include "atom.h" #ifdef CONFIG_ACPI #include <linux/acpi.h> #endif #include <linux/power_supply.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #define RADEON_IDLE_LOOP_MS 100 #define RADEON_RECLOCK_DELAY_MS 200 #define RADEON_WAIT_VBLANK_TIMEOUT 200 #define RADEON_WAIT_IDLE_TIMEOUT 200 static const char *radeon_pm_state_type_name[5] = { "Default", "Powersave", "Battery", "Balanced", "Performance", }; static void radeon_dynpm_idle_work_handler(struct work_struct *work); static int radeon_debugfs_pm_init(struct radeon_device *rdev); static bool radeon_pm_in_vbl(struct radeon_device *rdev); static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); static void radeon_pm_update_profile(struct radeon_device *rdev); static void radeon_pm_set_clocks(struct radeon_device *rdev); #define ACPI_AC_CLASS "ac_adapter" int radeon_pm_get_type_index(struct radeon_device *rdev, enum radeon_pm_state_type ps_type, int instance) { int i; int found_instance = -1; for (i = 0; i < rdev->pm.num_power_states; i++) { if (rdev->pm.power_state[i].type == ps_type) { found_instance++; if (found_instance == instance) return i; } } /* return default if no match */ return rdev->pm.default_power_state_index; } #ifdef CONFIG_ACPI static int radeon_acpi_event(struct notifier_block *nb, unsigned long val, void *data) { struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb); struct acpi_bus_event *entry = (struct acpi_bus_event *)data; if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) { if (power_supply_is_system_supplied() > 0) DRM_DEBUG_DRIVER("pm: AC\n"); else DRM_DEBUG_DRIVER("pm: DC\n"); if (rdev->pm.pm_method == PM_METHOD_PROFILE) { if (rdev->pm.profile == PM_PROFILE_AUTO) { mutex_lock(&rdev->pm.mutex); radeon_pm_update_profile(rdev); radeon_pm_set_clocks(rdev); mutex_unlock(&rdev->pm.mutex); } } } return NOTIFY_OK; } #endif static void radeon_pm_update_profile(struct radeon_device *rdev) { switch (rdev->pm.profile) { case PM_PROFILE_DEFAULT: rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX; break; case PM_PROFILE_AUTO: if (power_supply_is_system_supplied() > 0) { if (rdev->pm.active_crtc_count > 1) rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; else rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; } else { if (rdev->pm.active_crtc_count > 1) rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; else rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; } break; case PM_PROFILE_LOW: if (rdev->pm.active_crtc_count > 1) rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; else rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; break; case PM_PROFILE_MID: if (rdev->pm.active_crtc_count > 1) rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; else rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; break; case PM_PROFILE_HIGH: if (rdev->pm.active_crtc_count > 1) rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; else rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; break; } if (rdev->pm.active_crtc_count == 0) { rdev->pm.requested_power_state_index = rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx; rdev->pm.requested_clock_mode_index = rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx; } else { rdev->pm.requested_power_state_index = rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx; rdev->pm.requested_clock_mode_index = rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx; } } static void radeon_unmap_vram_bos(struct radeon_device *rdev) { struct radeon_bo *bo, *n; if (list_empty(&rdev->gem.objects)) return; list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { if (bo->tbo.mem.mem_type == TTM_PL_VRAM) ttm_bo_unmap_virtual(&bo->tbo); } } static void radeon_sync_with_vblank(struct radeon_device *rdev) { if (rdev->pm.active_crtcs) { rdev->pm.vblank_sync = false; wait_event_timeout( rdev->irq.vblank_queue, rdev->pm.vblank_sync, msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); } } static void radeon_set_power_state(struct radeon_device *rdev) { u32 sclk, mclk; bool misc_after = false; if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) return; if (radeon_gui_idle(rdev)) { sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. clock_info[rdev->pm.requested_clock_mode_index].sclk; if (sclk > rdev->pm.default_sclk) sclk = rdev->pm.default_sclk; mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. clock_info[rdev->pm.requested_clock_mode_index].mclk; if (mclk > rdev->pm.default_mclk) mclk = rdev->pm.default_mclk; /* upvolt before raising clocks, downvolt after lowering clocks */ if (sclk < rdev->pm.current_sclk) misc_after = true; radeon_sync_with_vblank(rdev); if (rdev->pm.pm_method == PM_METHOD_DYNPM) { if (!radeon_pm_in_vbl(rdev)) return; } radeon_pm_prepare(rdev); if (!misc_after) /* voltage, pcie lanes, etc.*/ radeon_pm_misc(rdev); /* set engine clock */ if (sclk != rdev->pm.current_sclk) { radeon_pm_debug_check_in_vbl(rdev, false); radeon_set_engine_clock(rdev, sclk); radeon_pm_debug_check_in_vbl(rdev, true); rdev->pm.current_sclk = sclk; DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk); } /* set memory clock */ if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) { radeon_pm_debug_check_in_vbl(rdev, false); radeon_set_memory_clock(rdev, mclk); radeon_pm_debug_check_in_vbl(rdev, true); rdev->pm.current_mclk = mclk; DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk); } if (misc_after) /* voltage, pcie lanes, etc.*/ radeon_pm_misc(rdev); radeon_pm_finish(rdev); rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; } else DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n"); } static void radeon_pm_set_clocks(struct radeon_device *rdev) { int i; /* no need to take locks, etc. if nothing's going to change */ if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) return; mutex_lock(&rdev->ddev->struct_mutex); mutex_lock(&rdev->vram_mutex); for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (rdev->ring[i].ring_obj) mutex_lock(&rdev->ring[i].mutex); } /* gui idle int has issues on older chips it seems */ if (rdev->family >= CHIP_R600) { if (rdev->irq.installed) { /* wait for GPU idle */ rdev->pm.gui_idle = false; rdev->irq.gui_idle = true; radeon_irq_set(rdev); wait_event_interruptible_timeout( rdev->irq.idle_queue, rdev->pm.gui_idle, msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT)); rdev->irq.gui_idle = false; radeon_irq_set(rdev); } } else { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; if (ring->ready) { struct radeon_fence *fence; radeon_ring_alloc(rdev, ring, 64); radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, ring)); radeon_fence_emit(rdev, fence); radeon_ring_commit(rdev, ring); radeon_fence_wait(fence, false); radeon_fence_unref(&fence); } } radeon_unmap_vram_bos(rdev); if (rdev->irq.installed) { for (i = 0; i < rdev->num_crtc; i++) { if (rdev->pm.active_crtcs & (1 << i)) { rdev->pm.req_vblank |= (1 << i); drm_vblank_get(rdev->ddev, i); } } } radeon_set_power_state(rdev); if (rdev->irq.installed) { for (i = 0; i < rdev->num_crtc; i++) { if (rdev->pm.req_vblank & (1 << i)) { rdev->pm.req_vblank &= ~(1 << i); drm_vblank_put(rdev->ddev, i); } } } /* update display watermarks based on new power state */ radeon_update_bandwidth_info(rdev); if (rdev->pm.active_crtc_count) radeon_bandwidth_update(rdev); rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (rdev->ring[i].ring_obj) mutex_unlock(&rdev->ring[i].mutex); } mutex_unlock(&rdev->vram_mutex); mutex_unlock(&rdev->ddev->struct_mutex); } static void radeon_pm_print_states(struct radeon_device *rdev) { int i, j; struct radeon_power_state *power_state; struct radeon_pm_clock_info *clock_info; DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states); for (i = 0; i < rdev->pm.num_power_states; i++) { power_state = &rdev->pm.power_state[i]; DRM_DEBUG_DRIVER("State %d: %s\n", i, radeon_pm_state_type_name[power_state->type]); if (i == rdev->pm.default_power_state_index) DRM_DEBUG_DRIVER("\tDefault"); if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes); if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) DRM_DEBUG_DRIVER("\tSingle display only\n"); DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes); for (j = 0; j < power_state->num_clock_modes; j++) { clock_info = &(power_state->clock_info[j]); if (rdev->flags & RADEON_IS_IGP) DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n", j, clock_info->sclk * 10, clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : ""); else DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n", j, clock_info->sclk * 10, clock_info->mclk * 10, clock_info->voltage.voltage, clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : ""); } } } static ssize_t radeon_get_pm_profile(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); struct radeon_device *rdev = ddev->dev_private; int cp = rdev->pm.profile; return snprintf(buf, PAGE_SIZE, "%s\n", (cp == PM_PROFILE_AUTO) ? "auto" : (cp == PM_PROFILE_LOW) ? "low" : (cp == PM_PROFILE_MID) ? "mid" : (cp == PM_PROFILE_HIGH) ? "high" : "default"); } static ssize_t radeon_set_pm_profile(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); struct radeon_device *rdev = ddev->dev_private; mutex_lock(&rdev->pm.mutex); if (rdev->pm.pm_method == PM_METHOD_PROFILE) { if (strncmp("default", buf, strlen("default")) == 0) rdev->pm.profile = PM_PROFILE_DEFAULT; else if (strncmp("auto", buf, strlen("auto")) == 0) rdev->pm.profile = PM_PROFILE_AUTO; else if (strncmp("low", buf, strlen("low")) == 0) rdev->pm.profile = PM_PROFILE_LOW; else if (strncmp("mid", buf, strlen("mid")) == 0) rdev->pm.profile = PM_PROFILE_MID; else if (strncmp("high", buf, strlen("high")) == 0) rdev->pm.profile = PM_PROFILE_HIGH; else { count = -EINVAL; goto fail; } radeon_pm_update_profile(rdev); radeon_pm_set_clocks(rdev); } else count = -EINVAL; fail: mutex_unlock(&rdev->pm.mutex); return count; } static ssize_t radeon_get_pm_method(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); struct radeon_device *rdev = ddev->dev_private; int pm = rdev->pm.pm_method; return snprintf(buf, PAGE_SIZE, "%s\n", (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile"); } static ssize_t radeon_set_pm_method(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); struct radeon_device *rdev = ddev->dev_private; if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { mutex_lock(&rdev->pm.mutex); rdev->pm.pm_method = PM_METHOD_DYNPM; rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; mutex_unlock(&rdev->pm.mutex); } else if (strncmp("profile", buf, strlen("profile")) == 0) { mutex_lock(&rdev->pm.mutex); /* disable dynpm */ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; rdev->pm.pm_method = PM_METHOD_PROFILE; mutex_unlock(&rdev->pm.mutex); cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); } else { count = -EINVAL; goto fail; } radeon_pm_compute_clocks(rdev); fail: return count; } static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); static ssize_t radeon_hwmon_show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); struct radeon_device *rdev = ddev->dev_private; int temp; switch (rdev->pm.int_thermal_type) { case THERMAL_TYPE_RV6XX: temp = rv6xx_get_temp(rdev); break; case THERMAL_TYPE_RV770: temp = rv770_get_temp(rdev); break; case THERMAL_TYPE_EVERGREEN: case THERMAL_TYPE_NI: temp = evergreen_get_temp(rdev); break; case THERMAL_TYPE_SUMO: temp = sumo_get_temp(rdev); break; case THERMAL_TYPE_SI: temp = si_get_temp(rdev); break; default: temp = 0; break; } return snprintf(buf, PAGE_SIZE, "%d\n", temp); } static ssize_t radeon_hwmon_show_name(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "radeon\n"); } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_name.dev_attr.attr, NULL }; static const struct attribute_group hwmon_attrgroup = { .attrs = hwmon_attributes, }; static int radeon_hwmon_init(struct radeon_device *rdev) { int err = 0; rdev->pm.int_hwmon_dev = NULL; switch (rdev->pm.int_thermal_type) { case THERMAL_TYPE_RV6XX: case THERMAL_TYPE_RV770: case THERMAL_TYPE_EVERGREEN: case THERMAL_TYPE_NI: case THERMAL_TYPE_SUMO: case THERMAL_TYPE_SI: /* No support for TN yet */ if (rdev->family == CHIP_ARUBA) return err; rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); if (IS_ERR(rdev->pm.int_hwmon_dev)) { err = PTR_ERR(rdev->pm.int_hwmon_dev); dev_err(rdev->dev, "Unable to register hwmon device: %d\n", err); break; } dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev); err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup); if (err) { dev_err(rdev->dev, "Unable to create hwmon sysfs file: %d\n", err); hwmon_device_unregister(rdev->dev); } break; default: break; } return err; } static void radeon_hwmon_fini(struct radeon_device *rdev) { if (rdev->pm.int_hwmon_dev) { sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup); hwmon_device_unregister(rdev->pm.int_hwmon_dev); } } void radeon_pm_suspend(struct radeon_device *rdev) { mutex_lock(&rdev->pm.mutex); if (rdev->pm.pm_method == PM_METHOD_DYNPM) { if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; } mutex_unlock(&rdev->pm.mutex); cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); } void radeon_pm_resume(struct radeon_device *rdev) { /* set up the default clocks if the MC ucode is loaded */ if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN) && rdev->mc_fw) { if (rdev->pm.default_vddc) radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, SET_VOLTAGE_TYPE_ASIC_VDDC); if (rdev->pm.default_vddci) radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI); if (rdev->pm.default_sclk) radeon_set_engine_clock(rdev, rdev->pm.default_sclk); if (rdev->pm.default_mclk) radeon_set_memory_clock(rdev, rdev->pm.default_mclk); } /* asic init will reset the default power state */ mutex_lock(&rdev->pm.mutex); rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; rdev->pm.current_clock_mode_index = 0; rdev->pm.current_sclk = rdev->pm.default_sclk; rdev->pm.current_mclk = rdev->pm.default_mclk; rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; if (rdev->pm.pm_method == PM_METHOD_DYNPM && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; schedule_delayed_work(&rdev->pm.dynpm_idle_work, msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); } mutex_unlock(&rdev->pm.mutex); radeon_pm_compute_clocks(rdev); } int radeon_pm_init(struct radeon_device *rdev) { int ret; /* default to profile method */ rdev->pm.pm_method = PM_METHOD_PROFILE; rdev->pm.profile = PM_PROFILE_DEFAULT; rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; rdev->pm.dynpm_can_upclock = true; rdev->pm.dynpm_can_downclock = true; rdev->pm.default_sclk = rdev->clock.default_sclk; rdev->pm.default_mclk = rdev->clock.default_mclk; rdev->pm.current_sclk = rdev->clock.default_sclk; rdev->pm.current_mclk = rdev->clock.default_mclk; rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; if (rdev->bios) { if (rdev->is_atom_bios) radeon_atombios_get_power_modes(rdev); else radeon_combios_get_power_modes(rdev); radeon_pm_print_states(rdev); radeon_pm_init_profile(rdev); /* set up the default clocks if the MC ucode is loaded */ if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN) && rdev->mc_fw) { if (rdev->pm.default_vddc) radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, SET_VOLTAGE_TYPE_ASIC_VDDC); if (rdev->pm.default_vddci) radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI); if (rdev->pm.default_sclk) radeon_set_engine_clock(rdev, rdev->pm.default_sclk); if (rdev->pm.default_mclk) radeon_set_memory_clock(rdev, rdev->pm.default_mclk); } } /* set up the internal thermal sensor if applicable */ ret = radeon_hwmon_init(rdev); if (ret) return ret; INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); if (rdev->pm.num_power_states > 1) { /* where's the best place to put these? */ ret = device_create_file(rdev->dev, &dev_attr_power_profile); if (ret) DRM_ERROR("failed to create device file for power profile\n"); ret = device_create_file(rdev->dev, &dev_attr_power_method); if (ret) DRM_ERROR("failed to create device file for power method\n"); #ifdef CONFIG_ACPI rdev->acpi_nb.notifier_call = radeon_acpi_event; register_acpi_notifier(&rdev->acpi_nb); #endif if (radeon_debugfs_pm_init(rdev)) { DRM_ERROR("Failed to register debugfs file for PM!\n"); } DRM_INFO("radeon: power management initialized\n"); } return 0; } void radeon_pm_fini(struct radeon_device *rdev) { if (rdev->pm.num_power_states > 1) { mutex_lock(&rdev->pm.mutex); if (rdev->pm.pm_method == PM_METHOD_PROFILE) { rdev->pm.profile = PM_PROFILE_DEFAULT; radeon_pm_update_profile(rdev); radeon_pm_set_clocks(rdev); } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { /* reset default clocks */ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; radeon_pm_set_clocks(rdev); } mutex_unlock(&rdev->pm.mutex); cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); device_remove_file(rdev->dev, &dev_attr_power_profile); device_remove_file(rdev->dev, &dev_attr_power_method); #ifdef CONFIG_ACPI unregister_acpi_notifier(&rdev->acpi_nb); #endif } if (rdev->pm.power_state) kfree(rdev->pm.power_state); radeon_hwmon_fini(rdev); } void radeon_pm_compute_clocks(struct radeon_device *rdev) { struct drm_device *ddev = rdev->ddev; struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; if (rdev->pm.num_power_states < 2) return; mutex_lock(&rdev->pm.mutex); rdev->pm.active_crtcs = 0; rdev->pm.active_crtc_count = 0; list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { radeon_crtc = to_radeon_crtc(crtc); if (radeon_crtc->enabled) { rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); rdev->pm.active_crtc_count++; } } if (rdev->pm.pm_method == PM_METHOD_PROFILE) { radeon_pm_update_profile(rdev); radeon_pm_set_clocks(rdev); } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) { if (rdev->pm.active_crtc_count > 1) { if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { cancel_delayed_work(&rdev->pm.dynpm_idle_work); rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; radeon_pm_get_dynpm_state(rdev); radeon_pm_set_clocks(rdev); DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n"); } } else if (rdev->pm.active_crtc_count == 1) { /* TODO: Increase clocks if needed for current mode */ if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) { rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK; radeon_pm_get_dynpm_state(rdev); radeon_pm_set_clocks(rdev); schedule_delayed_work(&rdev->pm.dynpm_idle_work, msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; schedule_delayed_work(&rdev->pm.dynpm_idle_work, msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); } } else { /* count == 0 */ if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) { cancel_delayed_work(&rdev->pm.dynpm_idle_work); rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM; rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM; radeon_pm_get_dynpm_state(rdev); radeon_pm_set_clocks(rdev); } } } } mutex_unlock(&rdev->pm.mutex); } static bool radeon_pm_in_vbl(struct radeon_device *rdev) { int crtc, vpos, hpos, vbl_status; bool in_vbl = true; /* Iterate over all active crtc's. All crtc's must be in vblank, * otherwise return in_vbl == false. */ for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { if (rdev->pm.active_crtcs & (1 << crtc)) { vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos); if ((vbl_status & DRM_SCANOUTPOS_VALID) && !(vbl_status & DRM_SCANOUTPOS_INVBL)) in_vbl = false; } } return in_vbl; } static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) { u32 stat_crtc = 0; bool in_vbl = radeon_pm_in_vbl(rdev); if (in_vbl == false) DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc, finish ? "exit" : "entry"); return in_vbl; } static void radeon_dynpm_idle_work_handler(struct work_struct *work) { struct radeon_device *rdev; int resched; rdev = container_of(work, struct radeon_device, pm.dynpm_idle_work.work); resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); mutex_lock(&rdev->pm.mutex); if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { int not_processed = 0; int i; for (i = 0; i < RADEON_NUM_RINGS; ++i) { not_processed += radeon_fence_count_emitted(rdev, i); if (not_processed >= 3) break; } if (not_processed >= 3) { /* should upclock */ if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) { rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && rdev->pm.dynpm_can_upclock) { rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK; rdev->pm.dynpm_action_timeout = jiffies + msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); } } else if (not_processed == 0) { /* should downclock */ if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) { rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && rdev->pm.dynpm_can_downclock) { rdev->pm.dynpm_planned_action = DYNPM_ACTION_DOWNCLOCK; rdev->pm.dynpm_action_timeout = jiffies + msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); } } /* Note, radeon_pm_set_clocks is called with static_switch set * to false since we want to wait for vbl to avoid flicker. */ if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE && jiffies > rdev->pm.dynpm_action_timeout) { radeon_pm_get_dynpm_state(rdev); radeon_pm_set_clocks(rdev); } schedule_delayed_work(&rdev->pm.dynpm_idle_work, msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); } mutex_unlock(&rdev->pm.mutex); ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); } /* * Debugfs info */ #if defined(CONFIG_DEBUG_FS) static int radeon_debugfs_pm_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */ if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP)) seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk); else seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); if (rdev->asic->pm.get_memory_clock) seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); if (rdev->pm.current_vddc) seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); if (rdev->asic->pm.get_pcie_lanes) seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); return 0; } static struct drm_info_list radeon_pm_info_list[] = { {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL}, }; #endif static int radeon_debugfs_pm_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); #else return 0; #endif }
gpl-2.0
finch0219/linux
fs/ocfs2/dlm/dlmast.c
1352
14600
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * dlmast.c * * AST and BAST functionality for local and remote nodes * * Copyright (C) 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/random.h> #include <linux/blkdev.h> #include <linux/socket.h> #include <linux/inet.h> #include <linux/spinlock.h> #include "cluster/heartbeat.h" #include "cluster/nodemanager.h" #include "cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" #define MLOG_MASK_PREFIX ML_DLM #include "cluster/masklog.h" static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock); static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); /* Should be called as an ast gets queued to see if the new * lock level will obsolete a pending bast. * For example, if dlm_thread queued a bast for an EX lock that * was blocking another EX, but before sending the bast the * lock owner downconverted to NL, the bast is now obsolete. * Only the ast should be sent. * This is needed because the lock and convert paths can queue * asts out-of-band (not waiting for dlm_thread) in order to * allow for LKM_NOQUEUE to get immediate responses. */ static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) { assert_spin_locked(&dlm->ast_lock); assert_spin_locked(&lock->spinlock); if (lock->ml.highest_blocked == LKM_IVMODE) return 0; BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); if (lock->bast_pending && list_empty(&lock->bast_list)) /* old bast already sent, ok */ return 0; if (lock->ml.type == LKM_EXMODE) /* EX blocks anything left, any bast still valid */ return 0; else if (lock->ml.type == LKM_NLMODE) /* NL blocks nothing, no reason to send any bast, cancel it */ return 1; else if (lock->ml.highest_blocked != LKM_EXMODE) /* PR only blocks EX */ return 1; return 0; } void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) { struct dlm_lock_resource *res; BUG_ON(!dlm); BUG_ON(!lock); res = lock->lockres; assert_spin_locked(&dlm->ast_lock); if (!list_empty(&lock->ast_list)) { mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, " "AST list not empty, pending %d, newlevel %d\n", dlm->name, res->lockname.len, res->lockname.name, dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), lock->ast_pending, lock->ml.type); BUG(); } if (lock->ast_pending) mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n", dlm->name, res->lockname.len, res->lockname.name, dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); /* putting lock on list, add a ref */ dlm_lock_get(lock); spin_lock(&lock->spinlock); /* check to see if this ast obsoletes the bast */ if (dlm_should_cancel_bast(dlm, lock)) { mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n", dlm->name, res->lockname.len, res->lockname.name, dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); lock->bast_pending = 0; list_del_init(&lock->bast_list); lock->ml.highest_blocked = LKM_IVMODE; /* removing lock from list, remove a ref. guaranteed * this won't be the last ref because of the get above, * so res->spinlock will not be taken here */ dlm_lock_put(lock); /* free up the reserved bast that we are cancelling. * guaranteed that this will not be the last reserved * ast because *both* an ast and a bast were reserved * to get to this point. the res->spinlock will not be * taken here */ dlm_lockres_release_ast(dlm, res); } list_add_tail(&lock->ast_list, &dlm->pending_asts); lock->ast_pending = 1; spin_unlock(&lock->spinlock); } void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) { BUG_ON(!dlm); BUG_ON(!lock); spin_lock(&dlm->ast_lock); __dlm_queue_ast(dlm, lock); spin_unlock(&dlm->ast_lock); } void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) { struct dlm_lock_resource *res; BUG_ON(!dlm); BUG_ON(!lock); assert_spin_locked(&dlm->ast_lock); res = lock->lockres; BUG_ON(!list_empty(&lock->bast_list)); if (lock->bast_pending) mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n", dlm->name, res->lockname.len, res->lockname.name, dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); /* putting lock on list, add a ref */ dlm_lock_get(lock); spin_lock(&lock->spinlock); list_add_tail(&lock->bast_list, &dlm->pending_basts); lock->bast_pending = 1; spin_unlock(&lock->spinlock); } void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) { BUG_ON(!dlm); BUG_ON(!lock); spin_lock(&dlm->ast_lock); __dlm_queue_bast(dlm, lock); spin_unlock(&dlm->ast_lock); } static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock) { struct dlm_lockstatus *lksb = lock->lksb; BUG_ON(!lksb); /* only updates if this node masters the lockres */ spin_lock(&res->spinlock); if (res->owner == dlm->node_num) { /* check the lksb flags for the direction */ if (lksb->flags & DLM_LKSB_GET_LVB) { mlog(0, "getting lvb from lockres for %s node\n", lock->ml.node == dlm->node_num ? "master" : "remote"); memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN); } /* Do nothing for lvb put requests - they should be done in * place when the lock is downconverted - otherwise we risk * racing gets and puts which could result in old lvb data * being propagated. We leave the put flag set and clear it * here. In the future we might want to clear it at the time * the put is actually done. */ } spin_unlock(&res->spinlock); /* reset any lvb flags on the lksb */ lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB); } void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock) { dlm_astlockfunc_t *fn; struct dlm_lockstatus *lksb; mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name, res->lockname.len, res->lockname.name, dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); lksb = lock->lksb; fn = lock->ast; BUG_ON(lock->ml.node != dlm->node_num); dlm_update_lvb(dlm, res, lock); (*fn)(lock->astdata); } int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock) { int ret; struct dlm_lockstatus *lksb; int lksbflags; mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name, res->lockname.len, res->lockname.name, dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); lksb = lock->lksb; BUG_ON(lock->ml.node == dlm->node_num); lksbflags = lksb->flags; dlm_update_lvb(dlm, res, lock); /* lock request came from another node * go do the ast over there */ ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags); return ret; } void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int blocked_type) { dlm_bastlockfunc_t *fn = lock->bast; BUG_ON(lock->ml.node != dlm->node_num); mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n", dlm->name, res->lockname.len, res->lockname.name, dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), blocked_type); (*fn)(lock->astdata, blocked_type); } int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { int ret; unsigned int locklen; struct dlm_ctxt *dlm = data; struct dlm_lock_resource *res = NULL; struct dlm_lock *lock = NULL; struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf; char *name; struct list_head *head = NULL; __be64 cookie; u32 flags; u8 node; if (!dlm_grab(dlm)) { dlm_error(DLM_REJECTED); return DLM_REJECTED; } mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), "Domain %s not fully joined!\n", dlm->name); name = past->name; locklen = past->namelen; cookie = past->cookie; flags = be32_to_cpu(past->flags); node = past->node_idx; if (locklen > DLM_LOCKID_NAME_MAX) { ret = DLM_IVBUFLEN; mlog(ML_ERROR, "Invalid name length (%d) in proxy ast " "handler!\n", locklen); goto leave; } if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) == (LKM_PUT_LVB|LKM_GET_LVB)) { mlog(ML_ERROR, "Both PUT and GET lvb specified, (0x%x)\n", flags); ret = DLM_BADARGS; goto leave; } mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : (flags & LKM_GET_LVB ? "get lvb" : "none")); mlog(0, "type=%d, blocked_type=%d\n", past->type, past->blocked_type); if (past->type != DLM_AST && past->type != DLM_BAST) { mlog(ML_ERROR, "Unknown ast type! %d, cookie=%u:%llu" "name=%.*s, node=%u\n", past->type, dlm_get_lock_cookie_node(be64_to_cpu(cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(cookie)), locklen, name, node); ret = DLM_IVLOCKID; goto leave; } res = dlm_lookup_lockres(dlm, name, locklen); if (!res) { mlog(0, "Got %sast for unknown lockres! cookie=%u:%llu, " "name=%.*s, node=%u\n", (past->type == DLM_AST ? "" : "b"), dlm_get_lock_cookie_node(be64_to_cpu(cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(cookie)), locklen, name, node); ret = DLM_IVLOCKID; goto leave; } /* cannot get a proxy ast message if this node owns it */ BUG_ON(res->owner == dlm->node_num); mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len, res->lockname.name); spin_lock(&res->spinlock); if (res->state & DLM_LOCK_RES_RECOVERING) { mlog(0, "Responding with DLM_RECOVERING!\n"); ret = DLM_RECOVERING; goto unlock_out; } if (res->state & DLM_LOCK_RES_MIGRATING) { mlog(0, "Responding with DLM_MIGRATING!\n"); ret = DLM_MIGRATING; goto unlock_out; } /* try convert queue for both ast/bast */ head = &res->converting; lock = NULL; list_for_each_entry(lock, head, list) { if (lock->ml.cookie == cookie) goto do_ast; } /* if not on convert, try blocked for ast, granted for bast */ if (past->type == DLM_AST) head = &res->blocked; else head = &res->granted; list_for_each_entry(lock, head, list) { /* if lock is found but unlock is pending ignore the bast */ if (lock->ml.cookie == cookie) { if (lock->unlock_pending) break; goto do_ast; } } mlog(0, "Got %sast for unknown lock! cookie=%u:%llu, name=%.*s, " "node=%u\n", past->type == DLM_AST ? "" : "b", dlm_get_lock_cookie_node(be64_to_cpu(cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(cookie)), locklen, name, node); ret = DLM_NORMAL; unlock_out: spin_unlock(&res->spinlock); goto leave; do_ast: ret = DLM_NORMAL; if (past->type == DLM_AST) { /* do not alter lock refcount. switching lists. */ list_move_tail(&lock->list, &res->granted); mlog(0, "%s: res %.*s, lock %u:%llu, Granted type %d => %d\n", dlm->name, res->lockname.len, res->lockname.name, dlm_get_lock_cookie_node(be64_to_cpu(cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(cookie)), lock->ml.type, lock->ml.convert_type); if (lock->ml.convert_type != LKM_IVMODE) { lock->ml.type = lock->ml.convert_type; lock->ml.convert_type = LKM_IVMODE; } else { // should already be there.... } lock->lksb->status = DLM_NORMAL; /* if we requested the lvb, fetch it into our lksb now */ if (flags & LKM_GET_LVB) { BUG_ON(!(lock->lksb->flags & DLM_LKSB_GET_LVB)); memcpy(lock->lksb->lvb, past->lvb, DLM_LVB_LEN); } } spin_unlock(&res->spinlock); if (past->type == DLM_AST) dlm_do_local_ast(dlm, res, lock); else dlm_do_local_bast(dlm, res, lock, past->blocked_type); leave: if (res) dlm_lockres_put(res); dlm_put(dlm); return ret; } int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int msg_type, int blocked_type, int flags) { int ret = 0; struct dlm_proxy_ast past; struct kvec vec[2]; size_t veclen = 1; int status; mlog(0, "%s: res %.*s, to %u, type %d, blocked_type %d\n", dlm->name, res->lockname.len, res->lockname.name, lock->ml.node, msg_type, blocked_type); memset(&past, 0, sizeof(struct dlm_proxy_ast)); past.node_idx = dlm->node_num; past.type = msg_type; past.blocked_type = blocked_type; past.namelen = res->lockname.len; memcpy(past.name, res->lockname.name, past.namelen); past.cookie = lock->ml.cookie; vec[0].iov_len = sizeof(struct dlm_proxy_ast); vec[0].iov_base = &past; if (flags & DLM_LKSB_GET_LVB) { be32_add_cpu(&past.flags, LKM_GET_LVB); vec[1].iov_len = DLM_LVB_LEN; vec[1].iov_base = lock->lksb->lvb; veclen++; } ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen, lock->ml.node, &status); if (ret < 0) mlog(ML_ERROR, "%s: res %.*s, error %d send AST to node %u\n", dlm->name, res->lockname.len, res->lockname.name, ret, lock->ml.node); else { if (status == DLM_RECOVERING) { mlog(ML_ERROR, "sent AST to node %u, it thinks this " "node is dead!\n", lock->ml.node); BUG(); } else if (status == DLM_MIGRATING) { mlog(ML_ERROR, "sent AST to node %u, it returned " "DLM_MIGRATING!\n", lock->ml.node); BUG(); } else if (status != DLM_NORMAL && status != DLM_IVLOCKID) { mlog(ML_ERROR, "AST to node %u returned %d!\n", lock->ml.node, status); /* ignore it */ } ret = 0; } return ret; }
gpl-2.0
tux-mind/tf201-kernel
drivers/acpi/acpica/excreate.c
1352
15390
/****************************************************************************** * * Module Name: excreate - Named object creation * *****************************************************************************/ /* * Copyright (C) 2000 - 2011, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #include "amlcode.h" #include "acnamesp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("excreate") #ifndef ACPI_NO_METHOD_EXECUTION /******************************************************************************* * * FUNCTION: acpi_ex_create_alias * * PARAMETERS: walk_state - Current state, contains operands * * RETURN: Status * * DESCRIPTION: Create a new named alias * ******************************************************************************/ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state) { struct acpi_namespace_node *target_node; struct acpi_namespace_node *alias_node; acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ex_create_alias); /* Get the source/alias operands (both namespace nodes) */ alias_node = (struct acpi_namespace_node *)walk_state->operands[0]; target_node = (struct acpi_namespace_node *)walk_state->operands[1]; if ((target_node->type == ACPI_TYPE_LOCAL_ALIAS) || (target_node->type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) { /* * Dereference an existing alias so that we don't create a chain * of aliases. With this code, we guarantee that an alias is * always exactly one level of indirection away from the * actual aliased name. */ target_node = ACPI_CAST_PTR(struct acpi_namespace_node, target_node->object); } /* * For objects that can never change (i.e., the NS node will * permanently point to the same object), we can simply attach * the object to the new NS node. For other objects (such as * Integers, buffers, etc.), we have to point the Alias node * to the original Node. */ switch (target_node->type) { /* For these types, the sub-object can change dynamically via a Store */ case ACPI_TYPE_INTEGER: case ACPI_TYPE_STRING: case ACPI_TYPE_BUFFER: case ACPI_TYPE_PACKAGE: case ACPI_TYPE_BUFFER_FIELD: /* * These types open a new scope, so we need the NS node in order to access * any children. */ case ACPI_TYPE_DEVICE: case ACPI_TYPE_POWER: case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_THERMAL: case ACPI_TYPE_LOCAL_SCOPE: /* * The new alias has the type ALIAS and points to the original * NS node, not the object itself. */ alias_node->type = ACPI_TYPE_LOCAL_ALIAS; alias_node->object = ACPI_CAST_PTR(union acpi_operand_object, target_node); break; case ACPI_TYPE_METHOD: /* * Control method aliases need to be differentiated */ alias_node->type = ACPI_TYPE_LOCAL_METHOD_ALIAS; alias_node->object = ACPI_CAST_PTR(union acpi_operand_object, target_node); break; default: /* Attach the original source object to the new Alias Node */ /* * The new alias assumes the type of the target, and it points * to the same object. The reference count of the object has an * additional reference to prevent deletion out from under either the * target node or the alias Node */ status = acpi_ns_attach_object(alias_node, acpi_ns_get_attached_object (target_node), target_node->type); break; } /* Since both operands are Nodes, we don't need to delete them */ return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_create_event * * PARAMETERS: walk_state - Current state * * RETURN: Status * * DESCRIPTION: Create a new event object * ******************************************************************************/ acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state) { acpi_status status; union acpi_operand_object *obj_desc; ACPI_FUNCTION_TRACE(ex_create_event); obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_EVENT); if (!obj_desc) { status = AE_NO_MEMORY; goto cleanup; } /* * Create the actual OS semaphore, with zero initial units -- meaning * that the event is created in an unsignalled state */ status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0, &obj_desc->event.os_semaphore); if (ACPI_FAILURE(status)) { goto cleanup; } /* Attach object to the Node */ status = acpi_ns_attach_object((struct acpi_namespace_node *)walk_state-> operands[0], obj_desc, ACPI_TYPE_EVENT); cleanup: /* * Remove local reference to the object (on error, will cause deletion * of both object and semaphore if present.) */ acpi_ut_remove_reference(obj_desc); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_create_mutex * * PARAMETERS: walk_state - Current state * * RETURN: Status * * DESCRIPTION: Create a new mutex object * * Mutex (Name[0], sync_level[1]) * ******************************************************************************/ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; union acpi_operand_object *obj_desc; ACPI_FUNCTION_TRACE_PTR(ex_create_mutex, ACPI_WALK_OPERANDS); /* Create the new mutex object */ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX); if (!obj_desc) { status = AE_NO_MEMORY; goto cleanup; } /* Create the actual OS Mutex */ status = acpi_os_create_mutex(&obj_desc->mutex.os_mutex); if (ACPI_FAILURE(status)) { goto cleanup; } /* Init object and attach to NS node */ obj_desc->mutex.sync_level = (u8) walk_state->operands[1]->integer.value; obj_desc->mutex.node = (struct acpi_namespace_node *)walk_state->operands[0]; status = acpi_ns_attach_object(obj_desc->mutex.node, obj_desc, ACPI_TYPE_MUTEX); cleanup: /* * Remove local reference to the object (on error, will cause deletion * of both object and semaphore if present.) */ acpi_ut_remove_reference(obj_desc); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_create_region * * PARAMETERS: aml_start - Pointer to the region declaration AML * aml_length - Max length of the declaration AML * region_space - space_iD for the region * walk_state - Current state * * RETURN: Status * * DESCRIPTION: Create a new operation region object * ******************************************************************************/ acpi_status acpi_ex_create_region(u8 * aml_start, u32 aml_length, u8 region_space, struct acpi_walk_state *walk_state) { acpi_status status; union acpi_operand_object *obj_desc; struct acpi_namespace_node *node; union acpi_operand_object *region_obj2; ACPI_FUNCTION_TRACE(ex_create_region); /* Get the Namespace Node */ node = walk_state->op->common.node; /* * If the region object is already attached to this node, * just return */ if (acpi_ns_get_attached_object(node)) { return_ACPI_STATUS(AE_OK); } /* * Space ID must be one of the predefined IDs, or in the user-defined * range */ if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) && (region_space < ACPI_USER_REGION_BEGIN) && (region_space != ACPI_ADR_SPACE_DATA_TABLE)) { ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X", region_space)); return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID); } ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n", acpi_ut_get_region_name(region_space), region_space)); /* Create the region descriptor */ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_REGION); if (!obj_desc) { status = AE_NO_MEMORY; goto cleanup; } /* * Remember location in AML stream of address & length * operands since they need to be evaluated at run time. */ region_obj2 = obj_desc->common.next_object; region_obj2->extra.aml_start = aml_start; region_obj2->extra.aml_length = aml_length; /* Init the region from the operands */ obj_desc->region.space_id = region_space; obj_desc->region.address = 0; obj_desc->region.length = 0; obj_desc->region.node = node; /* Install the new region object in the parent Node */ status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_REGION); cleanup: /* Remove local reference to the object */ acpi_ut_remove_reference(obj_desc); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_create_processor * * PARAMETERS: walk_state - Current state * * RETURN: Status * * DESCRIPTION: Create a new processor object and populate the fields * * Processor (Name[0], cpu_iD[1], pblock_addr[2], pblock_length[3]) * ******************************************************************************/ acpi_status acpi_ex_create_processor(struct acpi_walk_state *walk_state) { union acpi_operand_object **operand = &walk_state->operands[0]; union acpi_operand_object *obj_desc; acpi_status status; ACPI_FUNCTION_TRACE_PTR(ex_create_processor, walk_state); /* Create the processor object */ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PROCESSOR); if (!obj_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Initialize the processor object from the operands */ obj_desc->processor.proc_id = (u8) operand[1]->integer.value; obj_desc->processor.length = (u8) operand[3]->integer.value; obj_desc->processor.address = (acpi_io_address) operand[2]->integer.value; /* Install the processor object in the parent Node */ status = acpi_ns_attach_object((struct acpi_namespace_node *)operand[0], obj_desc, ACPI_TYPE_PROCESSOR); /* Remove local reference to the object */ acpi_ut_remove_reference(obj_desc); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_create_power_resource * * PARAMETERS: walk_state - Current state * * RETURN: Status * * DESCRIPTION: Create a new power_resource object and populate the fields * * power_resource (Name[0], system_level[1], resource_order[2]) * ******************************************************************************/ acpi_status acpi_ex_create_power_resource(struct acpi_walk_state *walk_state) { union acpi_operand_object **operand = &walk_state->operands[0]; acpi_status status; union acpi_operand_object *obj_desc; ACPI_FUNCTION_TRACE_PTR(ex_create_power_resource, walk_state); /* Create the power resource object */ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_POWER); if (!obj_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Initialize the power object from the operands */ obj_desc->power_resource.system_level = (u8) operand[1]->integer.value; obj_desc->power_resource.resource_order = (u16) operand[2]->integer.value; /* Install the power resource object in the parent Node */ status = acpi_ns_attach_object((struct acpi_namespace_node *)operand[0], obj_desc, ACPI_TYPE_POWER); /* Remove local reference to the object */ acpi_ut_remove_reference(obj_desc); return_ACPI_STATUS(status); } #endif /******************************************************************************* * * FUNCTION: acpi_ex_create_method * * PARAMETERS: aml_start - First byte of the method's AML * aml_length - AML byte count for this method * walk_state - Current state * * RETURN: Status * * DESCRIPTION: Create a new method object * ******************************************************************************/ acpi_status acpi_ex_create_method(u8 * aml_start, u32 aml_length, struct acpi_walk_state *walk_state) { union acpi_operand_object **operand = &walk_state->operands[0]; union acpi_operand_object *obj_desc; acpi_status status; u8 method_flags; ACPI_FUNCTION_TRACE_PTR(ex_create_method, walk_state); /* Create a new method object */ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_METHOD); if (!obj_desc) { status = AE_NO_MEMORY; goto exit; } /* Save the method's AML pointer and length */ obj_desc->method.aml_start = aml_start; obj_desc->method.aml_length = aml_length; /* * Disassemble the method flags. Split off the arg_count, Serialized * flag, and sync_level for efficiency. */ method_flags = (u8) operand[1]->integer.value; obj_desc->method.param_count = (u8) (method_flags & AML_METHOD_ARG_COUNT); /* * Get the sync_level. If method is serialized, a mutex will be * created for this method when it is parsed. */ if (method_flags & AML_METHOD_SERIALIZED) { obj_desc->method.info_flags = ACPI_METHOD_SERIALIZED; /* * ACPI 1.0: sync_level = 0 * ACPI 2.0: sync_level = sync_level in method declaration */ obj_desc->method.sync_level = (u8) ((method_flags & AML_METHOD_SYNC_LEVEL) >> 4); } /* Attach the new object to the method Node */ status = acpi_ns_attach_object((struct acpi_namespace_node *)operand[0], obj_desc, ACPI_TYPE_METHOD); /* Remove local reference to the object */ acpi_ut_remove_reference(obj_desc); exit: /* Remove a reference to the operand */ acpi_ut_remove_reference(operand[1]); return_ACPI_STATUS(status); }
gpl-2.0
dwengen/linux
drivers/ata/pata_marvell.c
1608
4327
/* * Marvell PATA driver. * * For the moment we drive the PATA port in legacy mode. That * isn't making full use of the device functionality but it is * easy to get working. * * (c) 2006 Red Hat */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/ata.h> #define DRV_NAME "pata_marvell" #define DRV_VERSION "0.1.6" /** * marvell_pata_active - check if PATA is active * @pdev: PCI device * * Returns 1 if the PATA port may be active. We know how to check this * for the 6145 but not the other devices */ static int marvell_pata_active(struct pci_dev *pdev) { int i; u32 devices; void __iomem *barp; /* We don't yet know how to do this for other devices */ if (pdev->device != 0x6145) return 1; barp = pci_iomap(pdev, 5, 0x10); if (barp == NULL) return -ENOMEM; printk("BAR5:"); for(i = 0; i <= 0x0F; i++) printk("%02X:%02X ", i, ioread8(barp + i)); printk("\n"); devices = ioread32(barp + 0x0C); pci_iounmap(pdev, barp); if (devices & 0x10) return 1; return 0; } /** * marvell_pre_reset - probe begin * @link: link * @deadline: deadline jiffies for the operation * * Perform the PATA port setup we need. */ static int marvell_pre_reset(struct ata_link *link, unsigned long deadline) { struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (pdev->device == 0x6145 && ap->port_no == 0 && !marvell_pata_active(pdev)) /* PATA enable ? */ return -ENOENT; return ata_sff_prereset(link, deadline); } static int marvell_cable_detect(struct ata_port *ap) { /* Cable type */ switch(ap->port_no) { case 0: if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1) return ATA_CBL_PATA40; return ATA_CBL_PATA80; case 1: /* Legacy SATA port */ return ATA_CBL_SATA; } BUG(); return 0; /* Our BUG macro needs the right markup */ } /* No PIO or DMA methods needed for this device */ static struct scsi_host_template marvell_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations marvell_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = marvell_cable_detect, .prereset = marvell_pre_reset, }; /** * marvell_init_one - Register Marvell ATA PCI device with kernel services * @pdev: PCI device to register * @ent: Entry in marvell_pci_tbl matching with @pdev * * Called from kernel PCI layer. * * LOCKING: * Inherited from PCI layer (may sleep). * * RETURNS: * Zero on success, or -ERRNO value. */ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *id) { static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &marvell_ops, }; static const struct ata_port_info info_sata = { /* Slave possible as its magically mapped not real */ .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &marvell_ops, }; const struct ata_port_info *ppi[] = { &info, &info_sata }; if (pdev->device == 0x6101) ppi[1] = &ata_dummy_port_info; #if defined(CONFIG_SATA_AHCI) || defined(CONFIG_SATA_AHCI_MODULE) if (!marvell_pata_active(pdev)) { printk(KERN_INFO DRV_NAME ": PATA port not active, deferring to AHCI driver.\n"); return -ENODEV; } #endif return ata_pci_bmdma_init_one(pdev, ppi, &marvell_sht, NULL, 0); } static const struct pci_device_id marvell_pci_tbl[] = { { PCI_DEVICE(0x11AB, 0x6101), }, { PCI_DEVICE(0x11AB, 0x6121), }, { PCI_DEVICE(0x11AB, 0x6123), }, { PCI_DEVICE(0x11AB, 0x6145), }, { PCI_DEVICE(0x1B4B, 0x91A0), }, { PCI_DEVICE(0x1B4B, 0x91A4), }, { } /* terminate list */ }; static struct pci_driver marvell_pci_driver = { .name = DRV_NAME, .id_table = marvell_pci_tbl, .probe = marvell_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM_SLEEP .suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; module_pci_driver(marvell_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("SCSI low-level driver for Marvell ATA in legacy mode"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, marvell_pci_tbl); MODULE_VERSION(DRV_VERSION);
gpl-2.0
varunchitre15/android_kernel_mediatek_sprout
net/mac80211/vht.c
2120
12207
/* * VHT handling * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/ieee80211.h> #include <linux/export.h> #include <net/mac80211.h> #include "ieee80211_i.h" #include "rate.h" static void __check_vhtcap_disable(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_vht_cap *vht_cap, u32 flag) { __le32 le_flag = cpu_to_le32(flag); if (sdata->u.mgd.vht_capa_mask.vht_cap_info & le_flag && !(sdata->u.mgd.vht_capa.vht_cap_info & le_flag)) vht_cap->cap &= ~flag; } void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_vht_cap *vht_cap) { int i; u16 rxmcs_mask, rxmcs_cap, rxmcs_n, txmcs_mask, txmcs_cap, txmcs_n; if (!vht_cap->vht_supported) return; if (sdata->vif.type != NL80211_IFTYPE_STATION) return; __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_RXLDPC); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_SHORT_GI_80); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_SHORT_GI_160); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_TXSTBC); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN); /* Allow user to decrease AMPDU length exponent */ if (sdata->u.mgd.vht_capa_mask.vht_cap_info & cpu_to_le32(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK)) { u32 cap, n; n = le32_to_cpu(sdata->u.mgd.vht_capa.vht_cap_info) & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; n >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; cap = vht_cap->cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; cap >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; if (n < cap) { vht_cap->cap &= ~IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; vht_cap->cap |= n << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; } } /* Allow the user to decrease MCSes */ rxmcs_mask = le16_to_cpu(sdata->u.mgd.vht_capa_mask.supp_mcs.rx_mcs_map); rxmcs_n = le16_to_cpu(sdata->u.mgd.vht_capa.supp_mcs.rx_mcs_map); rxmcs_n &= rxmcs_mask; rxmcs_cap = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); txmcs_mask = le16_to_cpu(sdata->u.mgd.vht_capa_mask.supp_mcs.tx_mcs_map); txmcs_n = le16_to_cpu(sdata->u.mgd.vht_capa.supp_mcs.tx_mcs_map); txmcs_n &= txmcs_mask; txmcs_cap = le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); for (i = 0; i < 8; i++) { u8 m, n, c; m = (rxmcs_mask >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; n = (rxmcs_n >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; c = (rxmcs_cap >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) || n == IEEE80211_VHT_MCS_NOT_SUPPORTED)) { rxmcs_cap &= ~(3 << 2*i); rxmcs_cap |= (rxmcs_n & (3 << 2*i)); } m = (txmcs_mask >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; n = (txmcs_n >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; c = (txmcs_cap >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) || n == IEEE80211_VHT_MCS_NOT_SUPPORTED)) { txmcs_cap &= ~(3 << 2*i); txmcs_cap |= (txmcs_n & (3 << 2*i)); } } vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_cap); vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_cap); } void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, const struct ieee80211_vht_cap *vht_cap_ie, struct sta_info *sta) { struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap; struct ieee80211_sta_vht_cap own_cap; u32 cap_info, i; memset(vht_cap, 0, sizeof(*vht_cap)); if (!sta->sta.ht_cap.ht_supported) return; if (!vht_cap_ie || !sband->vht_cap.vht_supported) return; /* A VHT STA must support 40 MHz */ if (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) return; vht_cap->vht_supported = true; own_cap = sband->vht_cap; /* * If user has specified capability overrides, take care * of that if the station we're setting up is the AP that * we advertised a restricted capability set to. Override * our own capabilities and then use those below. */ if (sdata->vif.type == NL80211_IFTYPE_STATION && !test_sta_flag(sta, WLAN_STA_TDLS_PEER)) ieee80211_apply_vhtcap_overrides(sdata, &own_cap); /* take some capabilities as-is */ cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info); vht_cap->cap = cap_info; vht_cap->cap &= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 | IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_VHT_TXOP_PS | IEEE80211_VHT_CAP_HTC_VHT | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB | IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB | IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; /* and some based on our own capabilities */ switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; break; case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; break; default: /* nothing */ break; } /* symmetric capabilities */ vht_cap->cap |= cap_info & own_cap.cap & (IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_SHORT_GI_160); /* remaining ones */ if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) { vht_cap->cap |= cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX | IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX); } if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE; if (own_cap.cap & IEEE80211_VHT_CAP_TXSTBC) vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_RXSTBC_MASK; if (own_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_TXSTBC; /* Copy peer MCS info, the driver might need them. */ memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs, sizeof(struct ieee80211_vht_mcs_info)); /* but also restrict MCSes */ for (i = 0; i < 8; i++) { u16 own_rx, own_tx, peer_rx, peer_tx; own_rx = le16_to_cpu(own_cap.vht_mcs.rx_mcs_map); own_rx = (own_rx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; own_tx = le16_to_cpu(own_cap.vht_mcs.tx_mcs_map); own_tx = (own_tx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; peer_rx = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); peer_rx = (peer_rx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; peer_tx = le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); peer_tx = (peer_tx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; if (peer_tx != IEEE80211_VHT_MCS_NOT_SUPPORTED) { if (own_rx == IEEE80211_VHT_MCS_NOT_SUPPORTED) peer_tx = IEEE80211_VHT_MCS_NOT_SUPPORTED; else if (own_rx < peer_tx) peer_tx = own_rx; } if (peer_rx != IEEE80211_VHT_MCS_NOT_SUPPORTED) { if (own_tx == IEEE80211_VHT_MCS_NOT_SUPPORTED) peer_rx = IEEE80211_VHT_MCS_NOT_SUPPORTED; else if (own_tx < peer_rx) peer_rx = own_tx; } vht_cap->vht_mcs.rx_mcs_map &= ~cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << i * 2); vht_cap->vht_mcs.rx_mcs_map |= cpu_to_le16(peer_rx << i * 2); vht_cap->vht_mcs.tx_mcs_map &= ~cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << i * 2); vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2); } /* finally set up the bandwidth */ switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160; break; default: sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80; } sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta); } enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; u32 cap = sta->sta.vht_cap.cap; enum ieee80211_sta_rx_bandwidth bw; if (!sta->sta.vht_cap.vht_supported) { bw = sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20; goto check_max; } switch (sdata->vif.bss_conf.chandef.width) { default: WARN_ON_ONCE(1); /* fall through */ case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_40: bw = sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20; break; case NL80211_CHAN_WIDTH_160: if ((cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ) { bw = IEEE80211_STA_RX_BW_160; break; } /* fall through */ case NL80211_CHAN_WIDTH_80P80: if ((cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) { bw = IEEE80211_STA_RX_BW_160; break; } /* fall through */ case NL80211_CHAN_WIDTH_80: bw = IEEE80211_STA_RX_BW_80; } check_max: if (bw > sta->cur_max_bandwidth) bw = sta->cur_max_bandwidth; return bw; } void ieee80211_sta_set_rx_nss(struct sta_info *sta) { u8 ht_rx_nss = 0, vht_rx_nss = 0; /* if we received a notification already don't overwrite it */ if (sta->sta.rx_nss) return; if (sta->sta.ht_cap.ht_supported) { if (sta->sta.ht_cap.mcs.rx_mask[0]) ht_rx_nss++; if (sta->sta.ht_cap.mcs.rx_mask[1]) ht_rx_nss++; if (sta->sta.ht_cap.mcs.rx_mask[2]) ht_rx_nss++; if (sta->sta.ht_cap.mcs.rx_mask[3]) ht_rx_nss++; /* FIXME: consider rx_highest? */ } if (sta->sta.vht_cap.vht_supported) { int i; u16 rx_mcs_map; rx_mcs_map = le16_to_cpu(sta->sta.vht_cap.vht_mcs.rx_mcs_map); for (i = 7; i >= 0; i--) { u8 mcs = (rx_mcs_map >> (2 * i)) & 3; if (mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) { vht_rx_nss = i + 1; break; } } /* FIXME: consider rx_highest? */ } ht_rx_nss = max(ht_rx_nss, vht_rx_nss); sta->sta.rx_nss = max_t(u8, 1, ht_rx_nss); } void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, u8 opmode, enum ieee80211_band band, bool nss_only) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; enum ieee80211_sta_rx_bandwidth new_bw; u32 changed = 0; u8 nss; sband = local->hw.wiphy->bands[band]; /* ignore - no support for BF yet */ if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF) return; nss = opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK; nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT; nss += 1; if (sta->sta.rx_nss != nss) { sta->sta.rx_nss = nss; changed |= IEEE80211_RC_NSS_CHANGED; } if (nss_only) goto change; switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) { case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20; break; case IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_40; break; case IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80; break; case IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160; break; } new_bw = ieee80211_sta_cur_vht_bw(sta); if (new_bw != sta->sta.bandwidth) { sta->sta.bandwidth = new_bw; changed |= IEEE80211_RC_NSS_CHANGED; } change: if (changed) rate_control_rate_update(local, sband, sta, changed); }
gpl-2.0
WildfireDEV/android_kernel_samsung_s6
net/sched/cls_tcindex.c
2376
12154
/* * net/sched/cls_tcindex.c Packet classifier for skb->tc_index * * Written 1998,1999 by Werner Almesberger, EPFL ICA */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/slab.h> #include <net/act_api.h> #include <net/netlink.h> #include <net/pkt_cls.h> /* * Passing parameters to the root seems to be done more awkwardly than really * necessary. At least, u32 doesn't seem to use such dirty hacks. To be * verified. FIXME. */ #define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */ #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */ #define PRIV(tp) ((struct tcindex_data *) (tp)->root) struct tcindex_filter_result { struct tcf_exts exts; struct tcf_result res; }; struct tcindex_filter { u16 key; struct tcindex_filter_result result; struct tcindex_filter *next; }; struct tcindex_data { struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */ struct tcindex_filter **h; /* imperfect hash; only used if !perfect; NULL if unused */ u16 mask; /* AND key with mask */ int shift; /* shift ANDed key to the right */ int hash; /* hash table size; 0 if undefined */ int alloc_hash; /* allocated size */ int fall_through; /* 0: only classify if explicit match */ }; static const struct tcf_ext_map tcindex_ext_map = { .police = TCA_TCINDEX_POLICE, .action = TCA_TCINDEX_ACT }; static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) { return tcf_exts_is_predicative(&r->exts) || r->res.classid; } static struct tcindex_filter_result * tcindex_lookup(struct tcindex_data *p, u16 key) { struct tcindex_filter *f; if (p->perfect) return tcindex_filter_is_set(p->perfect + key) ? p->perfect + key : NULL; else if (p->h) { for (f = p->h[key % p->hash]; f; f = f->next) if (f->key == key) return &f->result; } return NULL; } static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { struct tcindex_data *p = PRIV(tp); struct tcindex_filter_result *f; int key = (skb->tc_index & p->mask) >> p->shift; pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n", skb, tp, res, p); f = tcindex_lookup(p, key); if (!f) { if (!p->fall_through) return -1; res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle), key); res->class = 0; pr_debug("alg 0x%x\n", res->classid); return 0; } *res = f->res; pr_debug("map 0x%x\n", res->classid); return tcf_exts_exec(skb, &f->exts, res); } static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle) { struct tcindex_data *p = PRIV(tp); struct tcindex_filter_result *r; pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle); if (p->perfect && handle >= p->alloc_hash) return 0; r = tcindex_lookup(p, handle); return r && tcindex_filter_is_set(r) ? (unsigned long) r : 0UL; } static void tcindex_put(struct tcf_proto *tp, unsigned long f) { pr_debug("tcindex_put(tp %p,f 0x%lx)\n", tp, f); } static int tcindex_init(struct tcf_proto *tp) { struct tcindex_data *p; pr_debug("tcindex_init(tp %p)\n", tp); p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL); if (!p) return -ENOMEM; p->mask = 0xffff; p->hash = DEFAULT_HASH_SIZE; p->fall_through = 1; tp->root = p; return 0; } static int __tcindex_delete(struct tcf_proto *tp, unsigned long arg, int lock) { struct tcindex_data *p = PRIV(tp); struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg; struct tcindex_filter *f = NULL; pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p,f %p\n", tp, arg, p, f); if (p->perfect) { if (!r->res.class) return -ENOENT; } else { int i; struct tcindex_filter **walk = NULL; for (i = 0; i < p->hash; i++) for (walk = p->h+i; *walk; walk = &(*walk)->next) if (&(*walk)->result == r) goto found; return -ENOENT; found: f = *walk; if (lock) tcf_tree_lock(tp); *walk = f->next; if (lock) tcf_tree_unlock(tp); } tcf_unbind_filter(tp, &r->res); tcf_exts_destroy(tp, &r->exts); kfree(f); return 0; } static int tcindex_delete(struct tcf_proto *tp, unsigned long arg) { return __tcindex_delete(tp, arg, 1); } static inline int valid_perfect_hash(struct tcindex_data *p) { return p->hash > (p->mask >> p->shift); } static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = { [TCA_TCINDEX_HASH] = { .type = NLA_U32 }, [TCA_TCINDEX_MASK] = { .type = NLA_U16 }, [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 }, [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 }, [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 }, }; static int tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, u32 handle, struct tcindex_data *p, struct tcindex_filter_result *r, struct nlattr **tb, struct nlattr *est) { int err, balloc = 0; struct tcindex_filter_result new_filter_result, *old_r = r; struct tcindex_filter_result cr; struct tcindex_data cp; struct tcindex_filter *f = NULL; /* make gcc behave */ struct tcf_exts e; err = tcf_exts_validate(net, tp, tb, est, &e, &tcindex_ext_map); if (err < 0) return err; memcpy(&cp, p, sizeof(cp)); memset(&new_filter_result, 0, sizeof(new_filter_result)); if (old_r) memcpy(&cr, r, sizeof(cr)); else memset(&cr, 0, sizeof(cr)); if (tb[TCA_TCINDEX_HASH]) cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); if (tb[TCA_TCINDEX_MASK]) cp.mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); if (tb[TCA_TCINDEX_SHIFT]) cp.shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); err = -EBUSY; /* Hash already allocated, make sure that we still meet the * requirements for the allocated hash. */ if (cp.perfect) { if (!valid_perfect_hash(&cp) || cp.hash > cp.alloc_hash) goto errout; } else if (cp.h && cp.hash != cp.alloc_hash) goto errout; err = -EINVAL; if (tb[TCA_TCINDEX_FALL_THROUGH]) cp.fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); if (!cp.hash) { /* Hash not specified, use perfect hash if the upper limit * of the hashing index is below the threshold. */ if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD) cp.hash = (cp.mask >> cp.shift) + 1; else cp.hash = DEFAULT_HASH_SIZE; } if (!cp.perfect && !cp.h) cp.alloc_hash = cp.hash; /* Note: this could be as restrictive as if (handle & ~(mask >> shift)) * but then, we'd fail handles that may become valid after some future * mask change. While this is extremely unlikely to ever matter, * the check below is safer (and also more backwards-compatible). */ if (cp.perfect || valid_perfect_hash(&cp)) if (handle >= cp.alloc_hash) goto errout; err = -ENOMEM; if (!cp.perfect && !cp.h) { if (valid_perfect_hash(&cp)) { cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL); if (!cp.perfect) goto errout; balloc = 1; } else { cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL); if (!cp.h) goto errout; balloc = 2; } } if (cp.perfect) r = cp.perfect + handle; else r = tcindex_lookup(&cp, handle) ? : &new_filter_result; if (r == &new_filter_result) { f = kzalloc(sizeof(*f), GFP_KERNEL); if (!f) goto errout_alloc; } if (tb[TCA_TCINDEX_CLASSID]) { cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); tcf_bind_filter(tp, &cr.res, base); } tcf_exts_change(tp, &cr.exts, &e); tcf_tree_lock(tp); if (old_r && old_r != r) memset(old_r, 0, sizeof(*old_r)); memcpy(p, &cp, sizeof(cp)); memcpy(r, &cr, sizeof(cr)); if (r == &new_filter_result) { struct tcindex_filter **fp; f->key = handle; f->result = new_filter_result; f->next = NULL; for (fp = p->h+(handle % p->hash); *fp; fp = &(*fp)->next) /* nothing */; *fp = f; } tcf_tree_unlock(tp); return 0; errout_alloc: if (balloc == 1) kfree(cp.perfect); else if (balloc == 2) kfree(cp.h); errout: tcf_exts_destroy(tp, &e); return err; } static int tcindex_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, unsigned long *arg) { struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_TCINDEX_MAX + 1]; struct tcindex_data *p = PRIV(tp); struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg; int err; pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p," "p %p,r %p,*arg 0x%lx\n", tp, handle, tca, arg, opt, p, r, arg ? *arg : 0L); if (!opt) return 0; err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy); if (err < 0) return err; return tcindex_set_parms(net, tp, base, handle, p, r, tb, tca[TCA_RATE]); } static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker) { struct tcindex_data *p = PRIV(tp); struct tcindex_filter *f, *next; int i; pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p); if (p->perfect) { for (i = 0; i < p->hash; i++) { if (!p->perfect[i].res.class) continue; if (walker->count >= walker->skip) { if (walker->fn(tp, (unsigned long) (p->perfect+i), walker) < 0) { walker->stop = 1; return; } } walker->count++; } } if (!p->h) return; for (i = 0; i < p->hash; i++) { for (f = p->h[i]; f; f = next) { next = f->next; if (walker->count >= walker->skip) { if (walker->fn(tp, (unsigned long) &f->result, walker) < 0) { walker->stop = 1; return; } } walker->count++; } } } static int tcindex_destroy_element(struct tcf_proto *tp, unsigned long arg, struct tcf_walker *walker) { return __tcindex_delete(tp, arg, 0); } static void tcindex_destroy(struct tcf_proto *tp) { struct tcindex_data *p = PRIV(tp); struct tcf_walker walker; pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); walker.count = 0; walker.skip = 0; walker.fn = &tcindex_destroy_element; tcindex_walk(tp, &walker); kfree(p->perfect); kfree(p->h); kfree(p); tp->root = NULL; } static int tcindex_dump(struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { struct tcindex_data *p = PRIV(tp); struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n", tp, fh, skb, t, p, r, b); pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h); nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; if (!fh) { t->tcm_handle = ~0; /* whatever ... */ if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) || nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) || nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) || nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through)) goto nla_put_failure; nla_nest_end(skb, nest); } else { if (p->perfect) { t->tcm_handle = r-p->perfect; } else { struct tcindex_filter *f; int i; t->tcm_handle = 0; for (i = 0; !t->tcm_handle && i < p->hash; i++) { for (f = p->h[i]; !t->tcm_handle && f; f = f->next) { if (&f->result == r) t->tcm_handle = f->key; } } } pr_debug("handle = %d\n", t->tcm_handle); if (r->res.class && nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid)) goto nla_put_failure; if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0) goto nla_put_failure; nla_nest_end(skb, nest); if (tcf_exts_dump_stats(skb, &r->exts, &tcindex_ext_map) < 0) goto nla_put_failure; } return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static struct tcf_proto_ops cls_tcindex_ops __read_mostly = { .kind = "tcindex", .classify = tcindex_classify, .init = tcindex_init, .destroy = tcindex_destroy, .get = tcindex_get, .put = tcindex_put, .change = tcindex_change, .delete = tcindex_delete, .walk = tcindex_walk, .dump = tcindex_dump, .owner = THIS_MODULE, }; static int __init init_tcindex(void) { return register_tcf_proto_ops(&cls_tcindex_ops); } static void __exit exit_tcindex(void) { unregister_tcf_proto_ops(&cls_tcindex_ops); } module_init(init_tcindex) module_exit(exit_tcindex) MODULE_LICENSE("GPL");
gpl-2.0
noob4598/N910T-EPG2-SOURCE
fs/jffs2/fs.c
2376
20253
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/capability.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/mtd/mtd.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/vfs.h> #include <linux/crc32.h> #include "nodelist.h" static int jffs2_flash_setup(struct jffs2_sb_info *c); int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) { struct jffs2_full_dnode *old_metadata, *new_metadata; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_raw_inode *ri; union jffs2_device_node dev; unsigned char *mdata = NULL; int mdatalen = 0; unsigned int ivalid; uint32_t alloclen; int ret; int alloc_type = ALLOC_NORMAL; jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino); /* Special cases - we don't want more than one data node for these types on the medium at any time. So setattr must read the original data associated with the node (i.e. the device numbers or the target name) and write it out again with the appropriate data attached */ if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { /* For these, we don't actually need to read the old node */ mdatalen = jffs2_encode_dev(&dev, inode->i_rdev); mdata = (char *)&dev; jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n", __func__, mdatalen); } else if (S_ISLNK(inode->i_mode)) { mutex_lock(&f->sem); mdatalen = f->metadata->size; mdata = kmalloc(f->metadata->size, GFP_USER); if (!mdata) { mutex_unlock(&f->sem); return -ENOMEM; } ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen); if (ret) { mutex_unlock(&f->sem); kfree(mdata); return ret; } mutex_unlock(&f->sem); jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n", __func__, mdatalen); } ri = jffs2_alloc_raw_inode(); if (!ri) { if (S_ISLNK(inode->i_mode)) kfree(mdata); return -ENOMEM; } ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); if (ret) { jffs2_free_raw_inode(ri); if (S_ISLNK(inode->i_mode)) kfree(mdata); return ret; } mutex_lock(&f->sem); ivalid = iattr->ia_valid; ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen); ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)); ri->ino = cpu_to_je32(inode->i_ino); ri->version = cpu_to_je32(++f->highest_version); ri->uid = cpu_to_je16((ivalid & ATTR_UID)? from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode)); ri->gid = cpu_to_je16((ivalid & ATTR_GID)? from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode)); if (ivalid & ATTR_MODE) ri->mode = cpu_to_jemode(iattr->ia_mode); else ri->mode = cpu_to_jemode(inode->i_mode); ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size); ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime)); ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime)); ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime)); ri->offset = cpu_to_je32(0); ri->csize = ri->dsize = cpu_to_je32(mdatalen); ri->compr = JFFS2_COMPR_NONE; if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { /* It's an extension. Make it a hole node */ ri->compr = JFFS2_COMPR_ZERO; ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size); ri->offset = cpu_to_je32(inode->i_size); } else if (ivalid & ATTR_SIZE && !iattr->ia_size) { /* For truncate-to-zero, treat it as deletion because it'll always be obsoleting all previous nodes */ alloc_type = ALLOC_DELETION; } ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); if (mdatalen) ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen)); else ri->data_crc = cpu_to_je32(0); new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type); if (S_ISLNK(inode->i_mode)) kfree(mdata); if (IS_ERR(new_metadata)) { jffs2_complete_reservation(c); jffs2_free_raw_inode(ri); mutex_unlock(&f->sem); return PTR_ERR(new_metadata); } /* It worked. Update the inode */ inode->i_atime = ITIME(je32_to_cpu(ri->atime)); inode->i_ctime = ITIME(je32_to_cpu(ri->ctime)); inode->i_mtime = ITIME(je32_to_cpu(ri->mtime)); inode->i_mode = jemode_to_cpu(ri->mode); i_uid_write(inode, je16_to_cpu(ri->uid)); i_gid_write(inode, je16_to_cpu(ri->gid)); old_metadata = f->metadata; if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size); if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { jffs2_add_full_dnode_to_inode(c, f, new_metadata); inode->i_size = iattr->ia_size; inode->i_blocks = (inode->i_size + 511) >> 9; f->metadata = NULL; } else { f->metadata = new_metadata; } if (old_metadata) { jffs2_mark_node_obsolete(c, old_metadata->raw); jffs2_free_full_dnode(old_metadata); } jffs2_free_raw_inode(ri); mutex_unlock(&f->sem); jffs2_complete_reservation(c); /* We have to do the truncate_setsize() without f->sem held, since some pages may be locked and waiting for it in readpage(). We are protected from a simultaneous write() extending i_size back past iattr->ia_size, because do_truncate() holds the generic inode semaphore. */ if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) { truncate_setsize(inode, iattr->ia_size); inode->i_blocks = (inode->i_size + 511) >> 9; } return 0; } int jffs2_setattr(struct dentry *dentry, struct iattr *iattr) { int rc; rc = inode_change_ok(dentry->d_inode, iattr); if (rc) return rc; rc = jffs2_do_setattr(dentry->d_inode, iattr); if (!rc && (iattr->ia_valid & ATTR_MODE)) rc = jffs2_acl_chmod(dentry->d_inode); return rc; } int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf) { struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb); unsigned long avail; buf->f_type = JFFS2_SUPER_MAGIC; buf->f_bsize = 1 << PAGE_SHIFT; buf->f_blocks = c->flash_size >> PAGE_SHIFT; buf->f_files = 0; buf->f_ffree = 0; buf->f_namelen = JFFS2_MAX_NAME_LEN; buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC; buf->f_fsid.val[1] = c->mtd->index; spin_lock(&c->erase_completion_lock); avail = c->dirty_size + c->free_size; if (avail > c->sector_size * c->resv_blocks_write) avail -= c->sector_size * c->resv_blocks_write; else avail = 0; spin_unlock(&c->erase_completion_lock); buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT; return 0; } void jffs2_evict_inode (struct inode *inode) { /* We can forget about this inode for now - drop all * the nodelists associated with it, etc. */ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); jffs2_dbg(1, "%s(): ino #%lu mode %o\n", __func__, inode->i_ino, inode->i_mode); truncate_inode_pages(&inode->i_data, 0); clear_inode(inode); jffs2_do_clear_inode(c, f); } struct inode *jffs2_iget(struct super_block *sb, unsigned long ino) { struct jffs2_inode_info *f; struct jffs2_sb_info *c; struct jffs2_raw_inode latest_node; union jffs2_device_node jdev; struct inode *inode; dev_t rdev = 0; int ret; jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino); inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; f = JFFS2_INODE_INFO(inode); c = JFFS2_SB_INFO(inode->i_sb); jffs2_init_inode_info(f); mutex_lock(&f->sem); ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); if (ret) { mutex_unlock(&f->sem); iget_failed(inode); return ERR_PTR(ret); } inode->i_mode = jemode_to_cpu(latest_node.mode); i_uid_write(inode, je16_to_cpu(latest_node.uid)); i_gid_write(inode, je16_to_cpu(latest_node.gid)); inode->i_size = je32_to_cpu(latest_node.isize); inode->i_atime = ITIME(je32_to_cpu(latest_node.atime)); inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime)); inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime)); set_nlink(inode, f->inocache->pino_nlink); inode->i_blocks = (inode->i_size + 511) >> 9; switch (inode->i_mode & S_IFMT) { case S_IFLNK: inode->i_op = &jffs2_symlink_inode_operations; break; case S_IFDIR: { struct jffs2_full_dirent *fd; set_nlink(inode, 2); /* parent and '.' */ for (fd=f->dents; fd; fd = fd->next) { if (fd->type == DT_DIR && fd->ino) inc_nlink(inode); } /* Root dir gets i_nlink 3 for some reason */ if (inode->i_ino == 1) inc_nlink(inode); inode->i_op = &jffs2_dir_inode_operations; inode->i_fop = &jffs2_dir_operations; break; } case S_IFREG: inode->i_op = &jffs2_file_inode_operations; inode->i_fop = &jffs2_file_operations; inode->i_mapping->a_ops = &jffs2_file_address_operations; inode->i_mapping->nrpages = 0; break; case S_IFBLK: case S_IFCHR: /* Read the device numbers from the media */ if (f->metadata->size != sizeof(jdev.old_id) && f->metadata->size != sizeof(jdev.new_id)) { pr_notice("Device node has strange size %d\n", f->metadata->size); goto error_io; } jffs2_dbg(1, "Reading device numbers from flash\n"); ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size); if (ret < 0) { /* Eep */ pr_notice("Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino); goto error; } if (f->metadata->size == sizeof(jdev.old_id)) rdev = old_decode_dev(je16_to_cpu(jdev.old_id)); else rdev = new_decode_dev(je32_to_cpu(jdev.new_id)); case S_IFSOCK: case S_IFIFO: inode->i_op = &jffs2_file_inode_operations; init_special_inode(inode, inode->i_mode, rdev); break; default: pr_warn("%s(): Bogus i_mode %o for ino %lu\n", __func__, inode->i_mode, (unsigned long)inode->i_ino); } mutex_unlock(&f->sem); jffs2_dbg(1, "jffs2_read_inode() returning\n"); unlock_new_inode(inode); return inode; error_io: ret = -EIO; error: mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); iget_failed(inode); return ERR_PTR(ret); } void jffs2_dirty_inode(struct inode *inode, int flags) { struct iattr iattr; if (!(inode->i_state & I_DIRTY_DATASYNC)) { jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n", __func__, inode->i_ino); return; } jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n", __func__, inode->i_ino); iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME; iattr.ia_mode = inode->i_mode; iattr.ia_uid = inode->i_uid; iattr.ia_gid = inode->i_gid; iattr.ia_atime = inode->i_atime; iattr.ia_mtime = inode->i_mtime; iattr.ia_ctime = inode->i_ctime; jffs2_do_setattr(inode, &iattr); } int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data) { struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY)) return -EROFS; /* We stop if it was running, then restart if it needs to. This also catches the case where it was stopped and this is just a remount to restart it. Flush the writebuffer, if neccecary, else we loose it */ if (!(sb->s_flags & MS_RDONLY)) { jffs2_stop_garbage_collect_thread(c); mutex_lock(&c->alloc_sem); jffs2_flush_wbuf_pad(c); mutex_unlock(&c->alloc_sem); } if (!(*flags & MS_RDONLY)) jffs2_start_garbage_collect_thread(c); *flags |= MS_NOATIME; return 0; } /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash, fill in the raw_inode while you're at it. */ struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri) { struct inode *inode; struct super_block *sb = dir_i->i_sb; struct jffs2_sb_info *c; struct jffs2_inode_info *f; int ret; jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n", __func__, dir_i->i_ino, mode); c = JFFS2_SB_INFO(sb); inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); f = JFFS2_INODE_INFO(inode); jffs2_init_inode_info(f); mutex_lock(&f->sem); memset(ri, 0, sizeof(*ri)); /* Set OS-specific defaults for new inodes */ ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid())); if (dir_i->i_mode & S_ISGID) { ri->gid = cpu_to_je16(i_gid_read(dir_i)); if (S_ISDIR(mode)) mode |= S_ISGID; } else { ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid())); } /* POSIX ACLs have to be processed now, at least partly. The umask is only applied if there's no default ACL */ ret = jffs2_init_acl_pre(dir_i, inode, &mode); if (ret) { make_bad_inode(inode); iput(inode); return ERR_PTR(ret); } ret = jffs2_do_new_inode (c, f, mode, ri); if (ret) { make_bad_inode(inode); iput(inode); return ERR_PTR(ret); } set_nlink(inode, 1); inode->i_ino = je32_to_cpu(ri->ino); inode->i_mode = jemode_to_cpu(ri->mode); i_gid_write(inode, je16_to_cpu(ri->gid)); i_uid_write(inode, je16_to_cpu(ri->uid)); inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime)); inode->i_blocks = 0; inode->i_size = 0; if (insert_inode_locked(inode) < 0) { make_bad_inode(inode); iput(inode); return ERR_PTR(-EINVAL); } return inode; } static int calculate_inocache_hashsize(uint32_t flash_size) { /* * Pick a inocache hash size based on the size of the medium. * Count how many megabytes we're dealing with, apply a hashsize twice * that size, but rounding down to the usual big powers of 2. And keep * to sensible bounds. */ int size_mb = flash_size / 1024 / 1024; int hashsize = (size_mb * 2) & ~0x3f; if (hashsize < INOCACHE_HASHSIZE_MIN) return INOCACHE_HASHSIZE_MIN; if (hashsize > INOCACHE_HASHSIZE_MAX) return INOCACHE_HASHSIZE_MAX; return hashsize; } int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) { struct jffs2_sb_info *c; struct inode *root_i; int ret; size_t blocks; c = JFFS2_SB_INFO(sb); #ifndef CONFIG_JFFS2_FS_WRITEBUFFER if (c->mtd->type == MTD_NANDFLASH) { pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n"); return -EINVAL; } if (c->mtd->type == MTD_DATAFLASH) { pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n"); return -EINVAL; } #endif c->flash_size = c->mtd->size; c->sector_size = c->mtd->erasesize; blocks = c->flash_size / c->sector_size; /* * Size alignment check */ if ((c->sector_size * blocks) != c->flash_size) { c->flash_size = c->sector_size * blocks; pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n", c->flash_size / 1024); } if (c->flash_size < 5*c->sector_size) { pr_err("Too few erase blocks (%d)\n", c->flash_size / c->sector_size); return -EINVAL; } c->cleanmarker_size = sizeof(struct jffs2_unknown_node); /* NAND (or other bizarre) flash... do setup accordingly */ ret = jffs2_flash_setup(c); if (ret) return ret; c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size); c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL); if (!c->inocache_list) { ret = -ENOMEM; goto out_wbuf; } jffs2_init_xattr_subsystem(c); if ((ret = jffs2_do_mount_fs(c))) goto out_inohash; jffs2_dbg(1, "%s(): Getting root inode\n", __func__); root_i = jffs2_iget(sb, 1); if (IS_ERR(root_i)) { jffs2_dbg(1, "get root inode failed\n"); ret = PTR_ERR(root_i); goto out_root; } ret = -ENOMEM; jffs2_dbg(1, "%s(): d_make_root()\n", __func__); sb->s_root = d_make_root(root_i); if (!sb->s_root) goto out_root; sb->s_maxbytes = 0xFFFFFFFF; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = JFFS2_SUPER_MAGIC; if (!(sb->s_flags & MS_RDONLY)) jffs2_start_garbage_collect_thread(c); return 0; out_root: jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); if (jffs2_blocks_use_vmalloc(c)) vfree(c->blocks); else kfree(c->blocks); out_inohash: jffs2_clear_xattr_subsystem(c); kfree(c->inocache_list); out_wbuf: jffs2_flash_cleanup(c); return ret; } void jffs2_gc_release_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) { iput(OFNI_EDONI_2SFFJ(f)); } struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, int inum, int unlinked) { struct inode *inode; struct jffs2_inode_cache *ic; if (unlinked) { /* The inode has zero nlink but its nodes weren't yet marked obsolete. This has to be because we're still waiting for the final (close() and) iput() to happen. There's a possibility that the final iput() could have happened while we were contemplating. In order to ensure that we don't cause a new read_inode() (which would fail) for the inode in question, we use ilookup() in this case instead of iget(). The nlink can't _become_ zero at this point because we're holding the alloc_sem, and jffs2_do_unlink() would also need that while decrementing nlink on any inode. */ inode = ilookup(OFNI_BS_2SFFJ(c), inum); if (!inode) { jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n", inum); spin_lock(&c->inocache_lock); ic = jffs2_get_ino_cache(c, inum); if (!ic) { jffs2_dbg(1, "Inode cache for ino #%u is gone\n", inum); spin_unlock(&c->inocache_lock); return NULL; } if (ic->state != INO_STATE_CHECKEDABSENT) { /* Wait for progress. Don't just loop */ jffs2_dbg(1, "Waiting for ino #%u in state %d\n", ic->ino, ic->state); sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); } else { spin_unlock(&c->inocache_lock); } return NULL; } } else { /* Inode has links to it still; they're not going away because jffs2_do_unlink() would need the alloc_sem and we have it. Just iget() it, and if read_inode() is necessary that's OK. */ inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum); if (IS_ERR(inode)) return ERR_CAST(inode); } if (is_bad_inode(inode)) { pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n", inum, unlinked); /* NB. This will happen again. We need to do something appropriate here. */ iput(inode); return ERR_PTR(-EIO); } return JFFS2_INODE_INFO(inode); } unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, struct jffs2_inode_info *f, unsigned long offset, unsigned long *priv) { struct inode *inode = OFNI_EDONI_2SFFJ(f); struct page *pg; pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, (void *)jffs2_do_readpage_unlock, inode); if (IS_ERR(pg)) return (void *)pg; *priv = (unsigned long)pg; return kmap(pg); } void jffs2_gc_release_page(struct jffs2_sb_info *c, unsigned char *ptr, unsigned long *priv) { struct page *pg = (void *)*priv; kunmap(pg); page_cache_release(pg); } static int jffs2_flash_setup(struct jffs2_sb_info *c) { int ret = 0; if (jffs2_cleanmarker_oob(c)) { /* NAND flash... do setup accordingly */ ret = jffs2_nand_flash_setup(c); if (ret) return ret; } /* and Dataflash */ if (jffs2_dataflash(c)) { ret = jffs2_dataflash_setup(c); if (ret) return ret; } /* and Intel "Sibley" flash */ if (jffs2_nor_wbuf_flash(c)) { ret = jffs2_nor_wbuf_flash_setup(c); if (ret) return ret; } /* and an UBI volume */ if (jffs2_ubivol(c)) { ret = jffs2_ubivol_setup(c); if (ret) return ret; } return ret; } void jffs2_flash_cleanup(struct jffs2_sb_info *c) { if (jffs2_cleanmarker_oob(c)) { jffs2_nand_flash_cleanup(c); } /* and DataFlash */ if (jffs2_dataflash(c)) { jffs2_dataflash_cleanup(c); } /* and Intel "Sibley" flash */ if (jffs2_nor_wbuf_flash(c)) { jffs2_nor_wbuf_flash_cleanup(c); } /* and an UBI volume */ if (jffs2_ubivol(c)) { jffs2_ubivol_cleanup(c); } }
gpl-2.0
nitroglycerine33/Note2_Tmo-Att-Vzw_Kernel
fs/nfs/pagelist.c
2376
11904
/* * linux/fs/nfs/pagelist.c * * A set of helper functions for managing NFS read and write requests. * The main purpose of these routines is to provide support for the * coalescing of several requests into a single RPC call. * * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no> * */ #include <linux/slab.h> #include <linux/file.h> #include <linux/sched.h> #include <linux/sunrpc/clnt.h> #include <linux/nfs3.h> #include <linux/nfs4.h> #include <linux/nfs_page.h> #include <linux/nfs_fs.h> #include <linux/nfs_mount.h> #include "internal.h" #include "pnfs.h" static struct kmem_cache *nfs_page_cachep; static inline struct nfs_page * nfs_page_alloc(void) { struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL); if (p) INIT_LIST_HEAD(&p->wb_list); return p; } static inline void nfs_page_free(struct nfs_page *p) { kmem_cache_free(nfs_page_cachep, p); } /** * nfs_create_request - Create an NFS read/write request. * @file: file descriptor to use * @inode: inode to which the request is attached * @page: page to write * @offset: starting offset within the page for the write * @count: number of bytes to read/write * * The page must be locked by the caller. This makes sure we never * create two different requests for the same page. * User should ensure it is safe to sleep in this function. */ struct nfs_page * nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, struct page *page, unsigned int offset, unsigned int count) { struct nfs_page *req; /* try to allocate the request struct */ req = nfs_page_alloc(); if (req == NULL) return ERR_PTR(-ENOMEM); /* get lock context early so we can deal with alloc failures */ req->wb_lock_context = nfs_get_lock_context(ctx); if (req->wb_lock_context == NULL) { nfs_page_free(req); return ERR_PTR(-ENOMEM); } /* Initialize the request struct. Initially, we assume a * long write-back delay. This will be adjusted in * update_nfs_request below if the region is not locked. */ req->wb_page = page; atomic_set(&req->wb_complete, 0); req->wb_index = page->index; page_cache_get(page); BUG_ON(PagePrivate(page)); BUG_ON(!PageLocked(page)); BUG_ON(page->mapping->host != inode); req->wb_offset = offset; req->wb_pgbase = offset; req->wb_bytes = count; req->wb_context = get_nfs_open_context(ctx); kref_init(&req->wb_kref); return req; } /** * nfs_unlock_request - Unlock request and wake up sleepers. * @req: */ void nfs_unlock_request(struct nfs_page *req) { if (!NFS_WBACK_BUSY(req)) { printk(KERN_ERR "NFS: Invalid unlock attempted\n"); BUG(); } smp_mb__before_clear_bit(); clear_bit(PG_BUSY, &req->wb_flags); smp_mb__after_clear_bit(); wake_up_bit(&req->wb_flags, PG_BUSY); nfs_release_request(req); } /** * nfs_set_page_tag_locked - Tag a request as locked * @req: */ int nfs_set_page_tag_locked(struct nfs_page *req) { if (!nfs_lock_request_dontget(req)) return 0; if (test_bit(PG_MAPPED, &req->wb_flags)) radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); return 1; } /** * nfs_clear_page_tag_locked - Clear request tag and wake up sleepers */ void nfs_clear_page_tag_locked(struct nfs_page *req) { if (test_bit(PG_MAPPED, &req->wb_flags)) { struct inode *inode = req->wb_context->path.dentry->d_inode; struct nfs_inode *nfsi = NFS_I(inode); spin_lock(&inode->i_lock); radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); nfs_unlock_request(req); spin_unlock(&inode->i_lock); } else nfs_unlock_request(req); } /* * nfs_clear_request - Free up all resources allocated to the request * @req: * * Release page and open context resources associated with a read/write * request after it has completed. */ static void nfs_clear_request(struct nfs_page *req) { struct page *page = req->wb_page; struct nfs_open_context *ctx = req->wb_context; struct nfs_lock_context *l_ctx = req->wb_lock_context; if (page != NULL) { page_cache_release(page); req->wb_page = NULL; } if (l_ctx != NULL) { nfs_put_lock_context(l_ctx); req->wb_lock_context = NULL; } if (ctx != NULL) { put_nfs_open_context(ctx); req->wb_context = NULL; } } /** * nfs_release_request - Release the count on an NFS read/write request * @req: request to release * * Note: Should never be called with the spinlock held! */ static void nfs_free_request(struct kref *kref) { struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); /* Release struct file and open context */ nfs_clear_request(req); nfs_page_free(req); } void nfs_release_request(struct nfs_page *req) { kref_put(&req->wb_kref, nfs_free_request); } static int nfs_wait_bit_uninterruptible(void *word) { io_schedule(); return 0; } /** * nfs_wait_on_request - Wait for a request to complete. * @req: request to wait upon. * * Interruptible by fatal signals only. * The user is responsible for holding a count on the request. */ int nfs_wait_on_request(struct nfs_page *req) { return wait_on_bit(&req->wb_flags, PG_BUSY, nfs_wait_bit_uninterruptible, TASK_UNINTERRUPTIBLE); } bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req) { /* * FIXME: ideally we should be able to coalesce all requests * that are not block boundary aligned, but currently this * is problematic for the case of bsize < PAGE_CACHE_SIZE, * since nfs_flush_multi and nfs_pagein_multi assume you * can have only one struct nfs_page. */ if (desc->pg_bsize < PAGE_SIZE) return 0; return desc->pg_count + req->wb_bytes <= desc->pg_bsize; } EXPORT_SYMBOL_GPL(nfs_generic_pg_test); /** * nfs_pageio_init - initialise a page io descriptor * @desc: pointer to descriptor * @inode: pointer to inode * @doio: pointer to io function * @bsize: io block size * @io_flags: extra parameters for the io function */ void nfs_pageio_init(struct nfs_pageio_descriptor *desc, struct inode *inode, int (*doio)(struct nfs_pageio_descriptor *), size_t bsize, int io_flags) { INIT_LIST_HEAD(&desc->pg_list); desc->pg_bytes_written = 0; desc->pg_count = 0; desc->pg_bsize = bsize; desc->pg_base = 0; desc->pg_moreio = 0; desc->pg_inode = inode; desc->pg_doio = doio; desc->pg_ioflags = io_flags; desc->pg_error = 0; desc->pg_lseg = NULL; desc->pg_test = nfs_generic_pg_test; pnfs_pageio_init(desc, inode); } /** * nfs_can_coalesce_requests - test two requests for compatibility * @prev: pointer to nfs_page * @req: pointer to nfs_page * * The nfs_page structures 'prev' and 'req' are compared to ensure that the * page data area they describe is contiguous, and that their RPC * credentials, NFSv4 open state, and lockowners are the same. * * Return 'true' if this is the case, else return 'false'. */ static bool nfs_can_coalesce_requests(struct nfs_page *prev, struct nfs_page *req, struct nfs_pageio_descriptor *pgio) { if (req->wb_context->cred != prev->wb_context->cred) return false; if (req->wb_lock_context->lockowner != prev->wb_lock_context->lockowner) return false; if (req->wb_context->state != prev->wb_context->state) return false; if (req->wb_index != (prev->wb_index + 1)) return false; if (req->wb_pgbase != 0) return false; if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) return false; return pgio->pg_test(pgio, prev, req); } /** * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list. * @desc: destination io descriptor * @req: request * * Returns true if the request 'req' was successfully coalesced into the * existing list of pages 'desc'. */ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, struct nfs_page *req) { if (desc->pg_count != 0) { struct nfs_page *prev; prev = nfs_list_entry(desc->pg_list.prev); if (!nfs_can_coalesce_requests(prev, req, desc)) return 0; } else { desc->pg_base = req->wb_pgbase; } nfs_list_remove_request(req); nfs_list_add_request(req, &desc->pg_list); desc->pg_count += req->wb_bytes; return 1; } /* * Helper for nfs_pageio_add_request and nfs_pageio_complete */ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) { if (!list_empty(&desc->pg_list)) { int error = desc->pg_doio(desc); if (error < 0) desc->pg_error = error; else desc->pg_bytes_written += desc->pg_count; } if (list_empty(&desc->pg_list)) { desc->pg_count = 0; desc->pg_base = 0; } } /** * nfs_pageio_add_request - Attempt to coalesce a request into a page list. * @desc: destination io descriptor * @req: request * * Returns true if the request 'req' was successfully coalesced into the * existing list of pages 'desc'. */ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, struct nfs_page *req) { while (!nfs_pageio_do_add_request(desc, req)) { desc->pg_moreio = 1; nfs_pageio_doio(desc); if (desc->pg_error < 0) return 0; desc->pg_moreio = 0; } return 1; } /** * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor * @desc: pointer to io descriptor */ void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) { nfs_pageio_doio(desc); } /** * nfs_pageio_cond_complete - Conditional I/O completion * @desc: pointer to io descriptor * @index: page index * * It is important to ensure that processes don't try to take locks * on non-contiguous ranges of pages as that might deadlock. This * function should be called before attempting to wait on a locked * nfs_page. It will complete the I/O if the page index 'index' * is not contiguous with the existing list of pages in 'desc'. */ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) { if (!list_empty(&desc->pg_list)) { struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev); if (index != prev->wb_index + 1) nfs_pageio_doio(desc); } } #define NFS_SCAN_MAXENTRIES 16 /** * nfs_scan_list - Scan a list for matching requests * @nfsi: NFS inode * @dst: Destination list * @idx_start: lower bound of page->index to scan * @npages: idx_start + npages sets the upper bound to scan. * @tag: tag to scan for * * Moves elements from one of the inode request lists. * If the number of requests is set to 0, the entire address_space * starting at index idx_start, is scanned. * The requests are *not* checked to ensure that they form a contiguous set. * You must be holding the inode's i_lock when calling this function */ int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *dst, pgoff_t idx_start, unsigned int npages, int tag) { struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; struct nfs_page *req; pgoff_t idx_end; int found, i; int res; struct list_head *list; res = 0; if (npages == 0) idx_end = ~0; else idx_end = idx_start + npages - 1; for (;;) { found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&pgvec[0], idx_start, NFS_SCAN_MAXENTRIES, tag); if (found <= 0) break; for (i = 0; i < found; i++) { req = pgvec[i]; if (req->wb_index > idx_end) goto out; idx_start = req->wb_index + 1; if (nfs_set_page_tag_locked(req)) { kref_get(&req->wb_kref); radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, tag); list = pnfs_choose_commit_list(req, dst); nfs_list_add_request(req, list); res++; if (res == INT_MAX) goto out; } } /* for latency reduction */ cond_resched_lock(&nfsi->vfs_inode.i_lock); } out: return res; } int __init nfs_init_nfspagecache(void) { nfs_page_cachep = kmem_cache_create("nfs_page", sizeof(struct nfs_page), 0, SLAB_HWCACHE_ALIGN, NULL); if (nfs_page_cachep == NULL) return -ENOMEM; return 0; } void nfs_destroy_nfspagecache(void) { kmem_cache_destroy(nfs_page_cachep); }
gpl-2.0
tsfs/Vybrid-Linux
drivers/net/vmxnet3/vmxnet3_ethtool.c
2376
19244
/* * Linux driver for VMware's vmxnet3 ethernet NIC. * * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; version 2 of the License and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> * */ #include "vmxnet3_int.h" struct vmxnet3_stat_desc { char desc[ETH_GSTRING_LEN]; int offset; }; /* per tq stats maintained by the device */ static const struct vmxnet3_stat_desc vmxnet3_tq_dev_stats[] = { /* description, offset */ { "Tx Queue#", 0 }, { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, }; /* per tq stats maintained by the driver */ static const struct vmxnet3_stat_desc vmxnet3_tq_driver_stats[] = { /* description, offset */ {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, drop_total) }, { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, drop_too_many_frags) }, { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, drop_oversized_hdr) }, { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, drop_hdr_inspect_err) }, { " tso", offsetof(struct vmxnet3_tq_driver_stats, drop_tso) }, { " ring full", offsetof(struct vmxnet3_tq_driver_stats, tx_ring_full) }, { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, linearized) }, { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, copy_skb_header) }, { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, oversized_hdr) }, }; /* per rq stats maintained by the device */ static const struct vmxnet3_stat_desc vmxnet3_rq_dev_stats[] = { { "Rx Queue#", 0 }, { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, }; /* per rq stats maintained by the driver */ static const struct vmxnet3_stat_desc vmxnet3_rq_driver_stats[] = { /* description, offset */ { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, drop_total) }, { " err", offsetof(struct vmxnet3_rq_driver_stats, drop_err) }, { " fcs", offsetof(struct vmxnet3_rq_driver_stats, drop_fcs) }, { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, rx_buf_alloc_failure) }, }; /* gloabl stats maintained by the driver */ static const struct vmxnet3_stat_desc vmxnet3_global_stats[] = { /* description, offset */ { "tx timeout count", offsetof(struct vmxnet3_adapter, tx_timeout_count) } }; struct net_device_stats * vmxnet3_get_stats(struct net_device *netdev) { struct vmxnet3_adapter *adapter; struct vmxnet3_tq_driver_stats *drvTxStats; struct vmxnet3_rq_driver_stats *drvRxStats; struct UPT1_TxStats *devTxStats; struct UPT1_RxStats *devRxStats; struct net_device_stats *net_stats = &netdev->stats; unsigned long flags; int i; adapter = netdev_priv(netdev); /* Collect the dev stats into the shared area */ spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); spin_unlock_irqrestore(&adapter->cmd_lock, flags); memset(net_stats, 0, sizeof(*net_stats)); for (i = 0; i < adapter->num_tx_queues; i++) { devTxStats = &adapter->tqd_start[i].stats; drvTxStats = &adapter->tx_queue[i].stats; net_stats->tx_packets += devTxStats->ucastPktsTxOK + devTxStats->mcastPktsTxOK + devTxStats->bcastPktsTxOK; net_stats->tx_bytes += devTxStats->ucastBytesTxOK + devTxStats->mcastBytesTxOK + devTxStats->bcastBytesTxOK; net_stats->tx_errors += devTxStats->pktsTxError; net_stats->tx_dropped += drvTxStats->drop_total; } for (i = 0; i < adapter->num_rx_queues; i++) { devRxStats = &adapter->rqd_start[i].stats; drvRxStats = &adapter->rx_queue[i].stats; net_stats->rx_packets += devRxStats->ucastPktsRxOK + devRxStats->mcastPktsRxOK + devRxStats->bcastPktsRxOK; net_stats->rx_bytes += devRxStats->ucastBytesRxOK + devRxStats->mcastBytesRxOK + devRxStats->bcastBytesRxOK; net_stats->rx_errors += devRxStats->pktsRxError; net_stats->rx_dropped += drvRxStats->drop_total; net_stats->multicast += devRxStats->mcastPktsRxOK; } return net_stats; } static int vmxnet3_get_sset_count(struct net_device *netdev, int sset) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); switch (sset) { case ETH_SS_STATS: return (ARRAY_SIZE(vmxnet3_tq_dev_stats) + ARRAY_SIZE(vmxnet3_tq_driver_stats)) * adapter->num_tx_queues + (ARRAY_SIZE(vmxnet3_rq_dev_stats) + ARRAY_SIZE(vmxnet3_rq_driver_stats)) * adapter->num_rx_queues + ARRAY_SIZE(vmxnet3_global_stats); default: return -EOPNOTSUPP; } } /* Should be multiple of 4 */ #define NUM_TX_REGS 8 #define NUM_RX_REGS 12 static int vmxnet3_get_regs_len(struct net_device *netdev) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) + adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32)); } static void vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver)); drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT, sizeof(drvinfo->version)); drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0'; strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0'; strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), ETHTOOL_BUSINFO_LEN); drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS); drvinfo->testinfo_len = 0; drvinfo->eedump_len = 0; drvinfo->regdump_len = vmxnet3_get_regs_len(netdev); } static void vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); if (stringset == ETH_SS_STATS) { int i, j; for (j = 0; j < adapter->num_tx_queues; j++) { for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { memcpy(buf, vmxnet3_tq_dev_stats[i].desc, ETH_GSTRING_LEN); buf += ETH_GSTRING_LEN; } for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) { memcpy(buf, vmxnet3_tq_driver_stats[i].desc, ETH_GSTRING_LEN); buf += ETH_GSTRING_LEN; } } for (j = 0; j < adapter->num_rx_queues; j++) { for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { memcpy(buf, vmxnet3_rq_dev_stats[i].desc, ETH_GSTRING_LEN); buf += ETH_GSTRING_LEN; } for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) { memcpy(buf, vmxnet3_rq_driver_stats[i].desc, ETH_GSTRING_LEN); buf += ETH_GSTRING_LEN; } } for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { memcpy(buf, vmxnet3_global_stats[i].desc, ETH_GSTRING_LEN); buf += ETH_GSTRING_LEN; } } } int vmxnet3_set_features(struct net_device *netdev, u32 features) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); unsigned long flags; u32 changed = features ^ netdev->features; if (changed & (NETIF_F_RXCSUM|NETIF_F_LRO)) { if (features & NETIF_F_RXCSUM) adapter->shared->devRead.misc.uptFeatures |= UPT1_F_RXCSUM; else adapter->shared->devRead.misc.uptFeatures &= ~UPT1_F_RXCSUM; /* update harware LRO capability accordingly */ if (features & NETIF_F_LRO) adapter->shared->devRead.misc.uptFeatures |= UPT1_F_LRO; else adapter->shared->devRead.misc.uptFeatures &= ~UPT1_F_LRO; spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_FEATURE); spin_unlock_irqrestore(&adapter->cmd_lock, flags); } return 0; } static void vmxnet3_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *buf) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); unsigned long flags; u8 *base; int i; int j = 0; spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); spin_unlock_irqrestore(&adapter->cmd_lock, flags); /* this does assume each counter is 64-bit wide */ for (j = 0; j < adapter->num_tx_queues; j++) { base = (u8 *)&adapter->tqd_start[j].stats; *buf++ = (u64)j; for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); base = (u8 *)&adapter->tx_queue[j].stats; for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); } for (j = 0; j < adapter->num_tx_queues; j++) { base = (u8 *)&adapter->rqd_start[j].stats; *buf++ = (u64) j; for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset); base = (u8 *)&adapter->rx_queue[j].stats; for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); } base = (u8 *)adapter; for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) *buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset); } static void vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); u32 *buf = p; int i = 0, j = 0; memset(p, 0, vmxnet3_get_regs_len(netdev)); regs->version = 1; /* Update vmxnet3_get_regs_len if we want to dump more registers */ /* make each ring use multiple of 16 bytes */ for (i = 0; i < adapter->num_tx_queues; i++) { buf[j++] = adapter->tx_queue[i].tx_ring.next2fill; buf[j++] = adapter->tx_queue[i].tx_ring.next2comp; buf[j++] = adapter->tx_queue[i].tx_ring.gen; buf[j++] = 0; buf[j++] = adapter->tx_queue[i].comp_ring.next2proc; buf[j++] = adapter->tx_queue[i].comp_ring.gen; buf[j++] = adapter->tx_queue[i].stopped; buf[j++] = 0; } for (i = 0; i < adapter->num_rx_queues; i++) { buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill; buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp; buf[j++] = adapter->rx_queue[i].rx_ring[0].gen; buf[j++] = 0; buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill; buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp; buf[j++] = adapter->rx_queue[i].rx_ring[1].gen; buf[j++] = 0; buf[j++] = adapter->rx_queue[i].comp_ring.next2proc; buf[j++] = adapter->rx_queue[i].comp_ring.gen; buf[j++] = 0; buf[j++] = 0; } } static void vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC; wol->wolopts = adapter->wol; } static int vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST | WAKE_MAGICSECURE)) { return -EOPNOTSUPP; } adapter->wol = wol->wolopts; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); return 0; } static int vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); ecmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_TP; ecmd->advertising = ADVERTISED_TP; ecmd->port = PORT_TP; ecmd->transceiver = XCVR_INTERNAL; if (adapter->link_speed) { ethtool_cmd_speed_set(ecmd, adapter->link_speed); ecmd->duplex = DUPLEX_FULL; } else { ethtool_cmd_speed_set(ecmd, -1); ecmd->duplex = -1; } return 0; } static void vmxnet3_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *param) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE; param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE; param->rx_mini_max_pending = 0; param->rx_jumbo_max_pending = 0; param->rx_pending = adapter->rx_queue[0].rx_ring[0].size * adapter->num_rx_queues; param->tx_pending = adapter->tx_queue[0].tx_ring.size * adapter->num_tx_queues; param->rx_mini_pending = 0; param->rx_jumbo_pending = 0; } static int vmxnet3_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *param) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); u32 new_tx_ring_size, new_rx_ring_size; u32 sz; int err = 0; if (param->tx_pending == 0 || param->tx_pending > VMXNET3_TX_RING_MAX_SIZE) return -EINVAL; if (param->rx_pending == 0 || param->rx_pending > VMXNET3_RX_RING_MAX_SIZE) return -EINVAL; /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & ~VMXNET3_RING_SIZE_MASK; new_tx_ring_size = min_t(u32, new_tx_ring_size, VMXNET3_TX_RING_MAX_SIZE); if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size % VMXNET3_RING_SIZE_ALIGN) != 0) return -EINVAL; /* ring0 has to be a multiple of * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN */ sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz; new_rx_ring_size = min_t(u32, new_rx_ring_size, VMXNET3_RX_RING_MAX_SIZE / sz * sz); if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size % sz) != 0) return -EINVAL; if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size && new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) { return 0; } /* * Reset_work may be in the middle of resetting the device, wait for its * completion. */ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) msleep(1); if (netif_running(netdev)) { vmxnet3_quiesce_dev(adapter); vmxnet3_reset_dev(adapter); /* recreate the rx queue and the tx queue based on the * new sizes */ vmxnet3_tq_destroy_all(adapter); vmxnet3_rq_destroy_all(adapter); err = vmxnet3_create_queues(adapter, new_tx_ring_size, new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE); if (err) { /* failed, most likely because of OOM, try default * size */ printk(KERN_ERR "%s: failed to apply new sizes, try the" " default ones\n", netdev->name); err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, VMXNET3_DEF_RX_RING_SIZE, VMXNET3_DEF_RX_RING_SIZE); if (err) { printk(KERN_ERR "%s: failed to create queues " "with default sizes. Closing it\n", netdev->name); goto out; } } err = vmxnet3_activate_dev(adapter); if (err) printk(KERN_ERR "%s: failed to re-activate, error %d." " Closing it\n", netdev->name, err); } out: clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); if (err) vmxnet3_force_close(adapter); return err; } static int vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, void *rules) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); switch (info->cmd) { case ETHTOOL_GRXRINGS: info->data = adapter->num_rx_queues; return 0; } return -EOPNOTSUPP; } #ifdef VMXNET3_RSS static int vmxnet3_get_rss_indir(struct net_device *netdev, struct ethtool_rxfh_indir *p) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize); p->size = rssConf->indTableSize; while (n--) p->ring_index[n] = rssConf->indTable[n]; return 0; } static int vmxnet3_set_rss_indir(struct net_device *netdev, const struct ethtool_rxfh_indir *p) { unsigned int i; unsigned long flags; struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; if (p->size != rssConf->indTableSize) return -EINVAL; for (i = 0; i < rssConf->indTableSize; i++) { /* * Return with error code if any of the queue indices * is out of range */ if (p->ring_index[i] < 0 || p->ring_index[i] >= adapter->num_rx_queues) return -EINVAL; } for (i = 0; i < rssConf->indTableSize; i++) rssConf->indTable[i] = p->ring_index[i]; spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RSSIDT); spin_unlock_irqrestore(&adapter->cmd_lock, flags); return 0; } #endif static struct ethtool_ops vmxnet3_ethtool_ops = { .get_settings = vmxnet3_get_settings, .get_drvinfo = vmxnet3_get_drvinfo, .get_regs_len = vmxnet3_get_regs_len, .get_regs = vmxnet3_get_regs, .get_wol = vmxnet3_get_wol, .set_wol = vmxnet3_set_wol, .get_link = ethtool_op_get_link, .get_strings = vmxnet3_get_strings, .get_sset_count = vmxnet3_get_sset_count, .get_ethtool_stats = vmxnet3_get_ethtool_stats, .get_ringparam = vmxnet3_get_ringparam, .set_ringparam = vmxnet3_set_ringparam, .get_rxnfc = vmxnet3_get_rxnfc, #ifdef VMXNET3_RSS .get_rxfh_indir = vmxnet3_get_rss_indir, .set_rxfh_indir = vmxnet3_set_rss_indir, #endif }; void vmxnet3_set_ethtool_ops(struct net_device *netdev) { SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops); }
gpl-2.0
soderstrom-rikard/adi-linux
tools/perf/util/debug.c
2376
1582
/* For general debugging purposes */ #include "../perf.h" #include <string.h> #include <stdarg.h> #include <stdio.h> #include "cache.h" #include "color.h" #include "event.h" #include "debug.h" #include "util.h" #include "target.h" int verbose; bool dump_trace = false, quiet = false; int eprintf(int level, const char *fmt, ...) { va_list args; int ret = 0; if (verbose >= level) { va_start(args, fmt); if (use_browser >= 1) ui_helpline__vshow(fmt, args); else ret = vfprintf(stderr, fmt, args); va_end(args); } return ret; } int dump_printf(const char *fmt, ...) { va_list args; int ret = 0; if (dump_trace) { va_start(args, fmt); ret = vprintf(fmt, args); va_end(args); } return ret; } void trace_event(union perf_event *event) { unsigned char *raw_event = (void *)event; const char *color = PERF_COLOR_BLUE; int i, j; if (!dump_trace) return; printf("."); color_fprintf(stdout, color, "\n. ... raw event: size %d bytes\n", event->header.size); for (i = 0; i < event->header.size; i++) { if ((i & 15) == 0) { printf("."); color_fprintf(stdout, color, " %04x: ", i); } color_fprintf(stdout, color, " %02x", raw_event[i]); if (((i & 15) == 15) || i == event->header.size-1) { color_fprintf(stdout, color, " "); for (j = 0; j < 15-(i & 15); j++) color_fprintf(stdout, color, " "); for (j = i & ~15; j <= i; j++) { color_fprintf(stdout, color, "%c", isprint(raw_event[j]) ? raw_event[j] : '.'); } color_fprintf(stdout, color, "\n"); } } printf(".\n"); }
gpl-2.0
KaijunTang/linux-kernel
arch/m68k/mvme16x/rtc.c
3144
3824
/* * Real Time Clock interface for Linux on the MVME16x * * Based on the PC driver by Paul Gortmaker. */ #define RTC_VERSION "1.00" #include <linux/types.h> #include <linux/errno.h> #include <linux/miscdevice.h> #include <linux/ioport.h> #include <linux/capability.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/mc146818rtc.h> /* For struct rtc_time and ioctls, etc */ #include <linux/bcd.h> #include <asm/mvme16xhw.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/setup.h> /* * We sponge a minor off of the misc major. No need slurping * up another valuable major dev number for this. If you add * an ioctl, make sure you don't conflict with SPARC's RTC * ioctls. */ static const unsigned char days_in_mo[] = {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; static atomic_t rtc_ready = ATOMIC_INIT(1); static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { volatile MK48T08ptr_t rtc = (MK48T08ptr_t)MVME_RTC_BASE; unsigned long flags; struct rtc_time wtime; void __user *argp = (void __user *)arg; switch (cmd) { case RTC_RD_TIME: /* Read the time/date from RTC */ { local_irq_save(flags); /* Ensure clock and real-time-mode-register are accessible */ rtc->ctrl = RTC_READ; memset(&wtime, 0, sizeof(struct rtc_time)); wtime.tm_sec = bcd2bin(rtc->bcd_sec); wtime.tm_min = bcd2bin(rtc->bcd_min); wtime.tm_hour = bcd2bin(rtc->bcd_hr); wtime.tm_mday = bcd2bin(rtc->bcd_dom); wtime.tm_mon = bcd2bin(rtc->bcd_mth)-1; wtime.tm_year = bcd2bin(rtc->bcd_year); if (wtime.tm_year < 70) wtime.tm_year += 100; wtime.tm_wday = bcd2bin(rtc->bcd_dow)-1; rtc->ctrl = 0; local_irq_restore(flags); return copy_to_user(argp, &wtime, sizeof wtime) ? -EFAULT : 0; } case RTC_SET_TIME: /* Set the RTC */ { struct rtc_time rtc_tm; unsigned char mon, day, hrs, min, sec, leap_yr; unsigned int yrs; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (copy_from_user(&rtc_tm, argp, sizeof(struct rtc_time))) return -EFAULT; yrs = rtc_tm.tm_year; if (yrs < 1900) yrs += 1900; mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */ day = rtc_tm.tm_mday; hrs = rtc_tm.tm_hour; min = rtc_tm.tm_min; sec = rtc_tm.tm_sec; leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400)); if ((mon > 12) || (day == 0)) return -EINVAL; if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr))) return -EINVAL; if ((hrs >= 24) || (min >= 60) || (sec >= 60)) return -EINVAL; if (yrs >= 2070) return -EINVAL; local_irq_save(flags); rtc->ctrl = RTC_WRITE; rtc->bcd_sec = bin2bcd(sec); rtc->bcd_min = bin2bcd(min); rtc->bcd_hr = bin2bcd(hrs); rtc->bcd_dom = bin2bcd(day); rtc->bcd_mth = bin2bcd(mon); rtc->bcd_year = bin2bcd(yrs%100); rtc->ctrl = 0; local_irq_restore(flags); return 0; } default: return -EINVAL; } } /* * We enforce only one user at a time here with the open/close. */ static int rtc_open(struct inode *inode, struct file *file) { if( !atomic_dec_and_test(&rtc_ready) ) { atomic_inc( &rtc_ready ); return -EBUSY; } return 0; } static int rtc_release(struct inode *inode, struct file *file) { atomic_inc( &rtc_ready ); return 0; } /* * The various file operations we support. */ static const struct file_operations rtc_fops = { .unlocked_ioctl = rtc_ioctl, .open = rtc_open, .release = rtc_release, .llseek = noop_llseek, }; static struct miscdevice rtc_dev= { .minor = RTC_MINOR, .name = "rtc", .fops = &rtc_fops }; static int __init rtc_MK48T08_init(void) { if (!MACH_IS_MVME16x) return -ENODEV; printk(KERN_INFO "MK48T08 Real Time Clock Driver v%s\n", RTC_VERSION); return misc_register(&rtc_dev); } module_init(rtc_MK48T08_init);
gpl-2.0
ItsAnilSingh/android_kernel_samsung_logan2g
arch/cris/arch-v10/drivers/ds1302.c
3144
12080
/*!*************************************************************************** *! *! FILE NAME : ds1302.c *! *! DESCRIPTION: Implements an interface for the DS1302 RTC through Etrax I/O *! *! Functions exported: ds1302_readreg, ds1302_writereg, ds1302_init *! *! --------------------------------------------------------------------------- *! *! (C) Copyright 1999-2007 Axis Communications AB, LUND, SWEDEN *! *!***************************************************************************/ #include <linux/fs.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/bcd.h> #include <linux/capability.h> #include <asm/uaccess.h> #include <asm/system.h> #include <arch/svinto.h> #include <asm/io.h> #include <asm/rtc.h> #include <arch/io_interface_mux.h> #include "i2c.h" #define RTC_MAJOR_NR 121 /* local major, change later */ static DEFINE_MUTEX(ds1302_mutex); static const char ds1302_name[] = "ds1302"; /* The DS1302 might be connected to different bits on different products. * It has three signals - SDA, SCL and RST. RST and SCL are always outputs, * but SDA can have a selected direction. * For now, only PORT_PB is hardcoded. */ /* The RST bit may be on either the Generic Port or Port PB. */ #ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT #define TK_RST_OUT(x) REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, CONFIG_ETRAX_DS1302_RSTBIT, x) #define TK_RST_DIR(x) #else #define TK_RST_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_RSTBIT, x) #define TK_RST_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, CONFIG_ETRAX_DS1302_RSTBIT, x) #endif #define TK_SDA_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_SDABIT, x) #define TK_SCL_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_SCLBIT, x) #define TK_SDA_IN() ((*R_PORT_PB_READ >> CONFIG_ETRAX_DS1302_SDABIT) & 1) /* 1 is out, 0 is in */ #define TK_SDA_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, CONFIG_ETRAX_DS1302_SDABIT, x) #define TK_SCL_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, CONFIG_ETRAX_DS1302_SCLBIT, x) /* * The reason for tempudelay and not udelay is that loops_per_usec * (used in udelay) is not set when functions here are called from time.c */ static void tempudelay(int usecs) { volatile int loops; for(loops = usecs * 12; loops > 0; loops--) /* nothing */; } /* Send 8 bits. */ static void out_byte(unsigned char x) { int i; TK_SDA_DIR(1); for (i = 8; i--;) { /* The chip latches incoming bits on the rising edge of SCL. */ TK_SCL_OUT(0); TK_SDA_OUT(x & 1); tempudelay(1); TK_SCL_OUT(1); tempudelay(1); x >>= 1; } TK_SDA_DIR(0); } static unsigned char in_byte(void) { unsigned char x = 0; int i; /* Read byte. Bits come LSB first, on the falling edge of SCL. * Assume SDA is in input direction already. */ TK_SDA_DIR(0); for (i = 8; i--;) { TK_SCL_OUT(0); tempudelay(1); x >>= 1; x |= (TK_SDA_IN() << 7); TK_SCL_OUT(1); tempudelay(1); } return x; } /* Prepares for a transaction by de-activating RST (active-low). */ static void start(void) { TK_SCL_OUT(0); tempudelay(1); TK_RST_OUT(0); tempudelay(5); TK_RST_OUT(1); } /* Ends a transaction by taking RST active again. */ static void stop(void) { tempudelay(2); TK_RST_OUT(0); } /* Enable writing. */ static void ds1302_wenable(void) { start(); out_byte(0x8e); /* Write control register */ out_byte(0x00); /* Disable write protect bit 7 = 0 */ stop(); } /* Disable writing. */ static void ds1302_wdisable(void) { start(); out_byte(0x8e); /* Write control register */ out_byte(0x80); /* Disable write protect bit 7 = 0 */ stop(); } /* Read a byte from the selected register in the DS1302. */ unsigned char ds1302_readreg(int reg) { unsigned char x; start(); out_byte(0x81 | (reg << 1)); /* read register */ x = in_byte(); stop(); return x; } /* Write a byte to the selected register. */ void ds1302_writereg(int reg, unsigned char val) { #ifndef CONFIG_ETRAX_RTC_READONLY int do_writereg = 1; #else int do_writereg = 0; if (reg == RTC_TRICKLECHARGER) do_writereg = 1; #endif if (do_writereg) { ds1302_wenable(); start(); out_byte(0x80 | (reg << 1)); /* write register */ out_byte(val); stop(); ds1302_wdisable(); } } void get_rtc_time(struct rtc_time *rtc_tm) { unsigned long flags; local_irq_save(flags); rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS); rtc_tm->tm_min = CMOS_READ(RTC_MINUTES); rtc_tm->tm_hour = CMOS_READ(RTC_HOURS); rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH); rtc_tm->tm_mon = CMOS_READ(RTC_MONTH); rtc_tm->tm_year = CMOS_READ(RTC_YEAR); local_irq_restore(flags); rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday); rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon); rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); /* * Account for differences between how the RTC uses the values * and how they are defined in a struct rtc_time; */ if (rtc_tm->tm_year <= 69) rtc_tm->tm_year += 100; rtc_tm->tm_mon--; } static unsigned char days_in_mo[] = {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; /* ioctl that supports RTC_RD_TIME and RTC_SET_TIME (read and set time/date). */ static int rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned long flags; switch(cmd) { case RTC_RD_TIME: /* read the time/date from RTC */ { struct rtc_time rtc_tm; memset(&rtc_tm, 0, sizeof (struct rtc_time)); get_rtc_time(&rtc_tm); if (copy_to_user((struct rtc_time*)arg, &rtc_tm, sizeof(struct rtc_time))) return -EFAULT; return 0; } case RTC_SET_TIME: /* set the RTC */ { struct rtc_time rtc_tm; unsigned char mon, day, hrs, min, sec, leap_yr; unsigned int yrs; if (!capable(CAP_SYS_TIME)) return -EPERM; if (copy_from_user(&rtc_tm, (struct rtc_time*)arg, sizeof(struct rtc_time))) return -EFAULT; yrs = rtc_tm.tm_year + 1900; mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */ day = rtc_tm.tm_mday; hrs = rtc_tm.tm_hour; min = rtc_tm.tm_min; sec = rtc_tm.tm_sec; if ((yrs < 1970) || (yrs > 2069)) return -EINVAL; leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400)); if ((mon > 12) || (day == 0)) return -EINVAL; if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr))) return -EINVAL; if ((hrs >= 24) || (min >= 60) || (sec >= 60)) return -EINVAL; if (yrs >= 2000) yrs -= 2000; /* RTC (0, 1, ... 69) */ else yrs -= 1900; /* RTC (70, 71, ... 99) */ sec = bin2bcd(sec); min = bin2bcd(min); hrs = bin2bcd(hrs); day = bin2bcd(day); mon = bin2bcd(mon); yrs = bin2bcd(yrs); local_irq_save(flags); CMOS_WRITE(yrs, RTC_YEAR); CMOS_WRITE(mon, RTC_MONTH); CMOS_WRITE(day, RTC_DAY_OF_MONTH); CMOS_WRITE(hrs, RTC_HOURS); CMOS_WRITE(min, RTC_MINUTES); CMOS_WRITE(sec, RTC_SECONDS); local_irq_restore(flags); /* Notice that at this point, the RTC is updated but * the kernel is still running with the old time. * You need to set that separately with settimeofday * or adjtimex. */ return 0; } case RTC_SET_CHARGE: /* set the RTC TRICKLE CHARGE register */ { int tcs_val; if (!capable(CAP_SYS_TIME)) return -EPERM; if(copy_from_user(&tcs_val, (int*)arg, sizeof(int))) return -EFAULT; tcs_val = RTC_TCR_PATTERN | (tcs_val & 0x0F); ds1302_writereg(RTC_TRICKLECHARGER, tcs_val); return 0; } case RTC_VL_READ: { /* TODO: * Implement voltage low detection support */ printk(KERN_WARNING "DS1302: RTC Voltage Low detection" " is not supported\n"); return 0; } case RTC_VL_CLR: { /* TODO: * Nothing to do since Voltage Low detection is not supported */ return 0; } default: return -ENOIOCTLCMD; } } static long rtc_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&ds1302_mutex); ret = rtc_ioctl(file, cmd, arg); mutex_unlock(&ds1302_mutex); return ret; } static void print_rtc_status(void) { struct rtc_time tm; get_rtc_time(&tm); /* * There is no way to tell if the luser has the RTC set for local * time or for Universal Standard Time (GMT). Probably local though. */ printk(KERN_INFO "rtc_time\t: %02d:%02d:%02d\n", tm.tm_hour, tm.tm_min, tm.tm_sec); printk(KERN_INFO "rtc_date\t: %04d-%02d-%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday); } /* The various file operations we support. */ static const struct file_operations rtc_fops = { .owner = THIS_MODULE, .unlocked_ioctl = rtc_unlocked_ioctl, .llseek = noop_llseek, }; /* Probe for the chip by writing something to its RAM and try reading it back. */ #define MAGIC_PATTERN 0x42 static int __init ds1302_probe(void) { int retval, res; TK_RST_DIR(1); TK_SCL_DIR(1); TK_SDA_DIR(0); /* Try to talk to timekeeper. */ ds1302_wenable(); start(); out_byte(0xc0); /* write RAM byte 0 */ out_byte(MAGIC_PATTERN); /* write something magic */ start(); out_byte(0xc1); /* read RAM byte 0 */ if((res = in_byte()) == MAGIC_PATTERN) { stop(); ds1302_wdisable(); printk(KERN_INFO "%s: RTC found.\n", ds1302_name); printk(KERN_INFO "%s: SDA, SCL, RST on PB%i, PB%i, %s%i\n", ds1302_name, CONFIG_ETRAX_DS1302_SDABIT, CONFIG_ETRAX_DS1302_SCLBIT, #ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT "GENIO", #else "PB", #endif CONFIG_ETRAX_DS1302_RSTBIT); print_rtc_status(); retval = 1; } else { stop(); retval = 0; } return retval; } /* Just probe for the RTC and register the device to handle the ioctl needed. */ int __init ds1302_init(void) { #ifdef CONFIG_ETRAX_I2C i2c_init(); #endif if (!ds1302_probe()) { #ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT #if CONFIG_ETRAX_DS1302_RSTBIT == 27 /* * The only way to set g27 to output is to enable ATA. * * Make sure that R_GEN_CONFIG is setup correct. */ /* Allocating the ATA interface will grab almost all * pins in I/O groups a, b, c and d. A consequence of * allocating the ATA interface is that the fixed * interfaces shared RAM, parallel port 0, parallel * port 1, parallel port W, SCSI-8 port 0, SCSI-8 port * 1, SCSI-W, serial port 2, serial port 3, * synchronous serial port 3 and USB port 2 and almost * all GPIO pins on port g cannot be used. */ if (cris_request_io_interface(if_ata, "ds1302/ATA")) { printk(KERN_WARNING "ds1302: Failed to get IO interface\n"); return -1; } #elif CONFIG_ETRAX_DS1302_RSTBIT == 0 if (cris_io_interface_allocate_pins(if_gpio_grp_a, 'g', CONFIG_ETRAX_DS1302_RSTBIT, CONFIG_ETRAX_DS1302_RSTBIT)) { printk(KERN_WARNING "ds1302: Failed to get IO interface\n"); return -1; } /* Set the direction of this bit to out. */ genconfig_shadow = ((genconfig_shadow & ~IO_MASK(R_GEN_CONFIG, g0dir)) | (IO_STATE(R_GEN_CONFIG, g0dir, out))); *R_GEN_CONFIG = genconfig_shadow; #endif if (!ds1302_probe()) { printk(KERN_WARNING "%s: RTC not found.\n", ds1302_name); return -1; } #else printk(KERN_WARNING "%s: RTC not found.\n", ds1302_name); return -1; #endif } /* Initialise trickle charger */ ds1302_writereg(RTC_TRICKLECHARGER, RTC_TCR_PATTERN |(CONFIG_ETRAX_DS1302_TRICKLE_CHARGE & 0x0F)); /* Start clock by resetting CLOCK_HALT */ ds1302_writereg(RTC_SECONDS, (ds1302_readreg(RTC_SECONDS) & 0x7F)); return 0; } static int __init ds1302_register(void) { ds1302_init(); if (register_chrdev(RTC_MAJOR_NR, ds1302_name, &rtc_fops)) { printk(KERN_INFO "%s: unable to get major %d for rtc\n", ds1302_name, RTC_MAJOR_NR); return -1; } return 0; } module_init(ds1302_register);
gpl-2.0
kiriapurv/kiriyard-mako-kernel
drivers/pci/pcie/aspm.c
3400
28133
/* * File: drivers/pci/pcie/aspm.c * Enabling PCIe link L0s/L1 state and Clock Power Management * * Copyright (C) 2007 Intel * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com) * Copyright (C) Shaohua Li (shaohua.li@intel.com) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/pci_regs.h> #include <linux/errno.h> #include <linux/pm.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/delay.h> #include <linux/pci-aspm.h> #include "../pci.h" #ifdef MODULE_PARAM_PREFIX #undef MODULE_PARAM_PREFIX #endif #define MODULE_PARAM_PREFIX "pcie_aspm." /* Note: those are not register definitions */ #define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */ #define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */ #define ASPM_STATE_L1 (4) /* L1 state */ #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW) #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1) struct aspm_latency { u32 l0s; /* L0s latency (nsec) */ u32 l1; /* L1 latency (nsec) */ }; struct pcie_link_state { struct pci_dev *pdev; /* Upstream component of the Link */ struct pcie_link_state *root; /* pointer to the root port link */ struct pcie_link_state *parent; /* pointer to the parent Link state */ struct list_head sibling; /* node in link_list */ struct list_head children; /* list of child link states */ struct list_head link; /* node in parent's children list */ /* ASPM state */ u32 aspm_support:3; /* Supported ASPM state */ u32 aspm_enabled:3; /* Enabled ASPM state */ u32 aspm_capable:3; /* Capable ASPM state with latency */ u32 aspm_default:3; /* Default ASPM state by BIOS */ u32 aspm_disable:3; /* Disabled ASPM state */ /* Clock PM state */ u32 clkpm_capable:1; /* Clock PM capable? */ u32 clkpm_enabled:1; /* Current Clock PM state */ u32 clkpm_default:1; /* Default Clock PM state by BIOS */ /* Exit latencies */ struct aspm_latency latency_up; /* Upstream direction exit latency */ struct aspm_latency latency_dw; /* Downstream direction exit latency */ /* * Endpoint acceptable latencies. A pcie downstream port only * has one slot under it, so at most there are 8 functions. */ struct aspm_latency acceptable[8]; }; static int aspm_disabled, aspm_force; static bool aspm_support_enabled = true; static DEFINE_MUTEX(aspm_lock); static LIST_HEAD(link_list); #define POLICY_DEFAULT 0 /* BIOS default setting */ #define POLICY_PERFORMANCE 1 /* high performance */ #define POLICY_POWERSAVE 2 /* high power saving */ #ifdef CONFIG_PCIEASPM_PERFORMANCE static int aspm_policy = POLICY_PERFORMANCE; #elif defined CONFIG_PCIEASPM_POWERSAVE static int aspm_policy = POLICY_POWERSAVE; #else static int aspm_policy; #endif static const char *policy_str[] = { [POLICY_DEFAULT] = "default", [POLICY_PERFORMANCE] = "performance", [POLICY_POWERSAVE] = "powersave" }; #define LINK_RETRAIN_TIMEOUT HZ static int policy_to_aspm_state(struct pcie_link_state *link) { switch (aspm_policy) { case POLICY_PERFORMANCE: /* Disable ASPM and Clock PM */ return 0; case POLICY_POWERSAVE: /* Enable ASPM L0s/L1 */ return ASPM_STATE_ALL; case POLICY_DEFAULT: return link->aspm_default; } return 0; } static int policy_to_clkpm_state(struct pcie_link_state *link) { switch (aspm_policy) { case POLICY_PERFORMANCE: /* Disable ASPM and Clock PM */ return 0; case POLICY_POWERSAVE: /* Disable Clock PM */ return 1; case POLICY_DEFAULT: return link->clkpm_default; } return 0; } static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) { int pos; u16 reg16; struct pci_dev *child; struct pci_bus *linkbus = link->pdev->subordinate; list_for_each_entry(child, &linkbus->devices, bus_list) { pos = pci_pcie_cap(child); if (!pos) return; pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16); if (enable) reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN; else reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN; pci_write_config_word(child, pos + PCI_EXP_LNKCTL, reg16); } link->clkpm_enabled = !!enable; } static void pcie_set_clkpm(struct pcie_link_state *link, int enable) { /* Don't enable Clock PM if the link is not Clock PM capable */ if (!link->clkpm_capable && enable) enable = 0; /* Need nothing if the specified equals to current state */ if (link->clkpm_enabled == enable) return; pcie_set_clkpm_nocheck(link, enable); } static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) { int pos, capable = 1, enabled = 1; u32 reg32; u16 reg16; struct pci_dev *child; struct pci_bus *linkbus = link->pdev->subordinate; /* All functions should have the same cap and state, take the worst */ list_for_each_entry(child, &linkbus->devices, bus_list) { pos = pci_pcie_cap(child); if (!pos) return; pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, &reg32); if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) { capable = 0; enabled = 0; break; } pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16); if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) enabled = 0; } link->clkpm_enabled = enabled; link->clkpm_default = enabled; link->clkpm_capable = (blacklist) ? 0 : capable; } /* * pcie_aspm_configure_common_clock: check if the 2 ends of a link * could use common clock. If they are, configure them to use the * common clock. That will reduce the ASPM state exit latency. */ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) { int ppos, cpos, same_clock = 1; u16 reg16, parent_reg, child_reg[8]; unsigned long start_jiffies; struct pci_dev *child, *parent = link->pdev; struct pci_bus *linkbus = parent->subordinate; /* * All functions of a slot should have the same Slot Clock * Configuration, so just check one function */ child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); BUG_ON(!pci_is_pcie(child)); /* Check downstream component if bit Slot Clock Configuration is 1 */ cpos = pci_pcie_cap(child); pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, &reg16); if (!(reg16 & PCI_EXP_LNKSTA_SLC)) same_clock = 0; /* Check upstream component if bit Slot Clock Configuration is 1 */ ppos = pci_pcie_cap(parent); pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16); if (!(reg16 & PCI_EXP_LNKSTA_SLC)) same_clock = 0; /* Configure downstream component, all functions */ list_for_each_entry(child, &linkbus->devices, bus_list) { cpos = pci_pcie_cap(child); pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, &reg16); child_reg[PCI_FUNC(child->devfn)] = reg16; if (same_clock) reg16 |= PCI_EXP_LNKCTL_CCC; else reg16 &= ~PCI_EXP_LNKCTL_CCC; pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, reg16); } /* Configure upstream component */ pci_read_config_word(parent, ppos + PCI_EXP_LNKCTL, &reg16); parent_reg = reg16; if (same_clock) reg16 |= PCI_EXP_LNKCTL_CCC; else reg16 &= ~PCI_EXP_LNKCTL_CCC; pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16); /* Retrain link */ reg16 |= PCI_EXP_LNKCTL_RL; pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16); /* Wait for link training end. Break out after waiting for timeout */ start_jiffies = jiffies; for (;;) { pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16); if (!(reg16 & PCI_EXP_LNKSTA_LT)) break; if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) break; msleep(1); } if (!(reg16 & PCI_EXP_LNKSTA_LT)) return; /* Training failed. Restore common clock configurations */ dev_printk(KERN_ERR, &parent->dev, "ASPM: Could not configure common clock\n"); list_for_each_entry(child, &linkbus->devices, bus_list) { cpos = pci_pcie_cap(child); pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, child_reg[PCI_FUNC(child->devfn)]); } pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, parent_reg); } /* Convert L0s latency encoding to ns */ static u32 calc_l0s_latency(u32 encoding) { if (encoding == 0x7) return (5 * 1000); /* > 4us */ return (64 << encoding); } /* Convert L0s acceptable latency encoding to ns */ static u32 calc_l0s_acceptable(u32 encoding) { if (encoding == 0x7) return -1U; return (64 << encoding); } /* Convert L1 latency encoding to ns */ static u32 calc_l1_latency(u32 encoding) { if (encoding == 0x7) return (65 * 1000); /* > 64us */ return (1000 << encoding); } /* Convert L1 acceptable latency encoding to ns */ static u32 calc_l1_acceptable(u32 encoding) { if (encoding == 0x7) return -1U; return (1000 << encoding); } struct aspm_register_info { u32 support:2; u32 enabled:2; u32 latency_encoding_l0s; u32 latency_encoding_l1; }; static void pcie_get_aspm_reg(struct pci_dev *pdev, struct aspm_register_info *info) { int pos; u16 reg16; u32 reg32; pos = pci_pcie_cap(pdev); pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32); info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC; } static void pcie_aspm_check_latency(struct pci_dev *endpoint) { u32 latency, l1_switch_latency = 0; struct aspm_latency *acceptable; struct pcie_link_state *link; /* Device not in D0 doesn't need latency check */ if ((endpoint->current_state != PCI_D0) && (endpoint->current_state != PCI_UNKNOWN)) return; link = endpoint->bus->self->link_state; acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)]; while (link) { /* Check upstream direction L0s latency */ if ((link->aspm_capable & ASPM_STATE_L0S_UP) && (link->latency_up.l0s > acceptable->l0s)) link->aspm_capable &= ~ASPM_STATE_L0S_UP; /* Check downstream direction L0s latency */ if ((link->aspm_capable & ASPM_STATE_L0S_DW) && (link->latency_dw.l0s > acceptable->l0s)) link->aspm_capable &= ~ASPM_STATE_L0S_DW; /* * Check L1 latency. * Every switch on the path to root complex need 1 * more microsecond for L1. Spec doesn't mention L0s. */ latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1); if ((link->aspm_capable & ASPM_STATE_L1) && (latency + l1_switch_latency > acceptable->l1)) link->aspm_capable &= ~ASPM_STATE_L1; l1_switch_latency += 1000; link = link->parent; } } static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) { struct pci_dev *child, *parent = link->pdev; struct pci_bus *linkbus = parent->subordinate; struct aspm_register_info upreg, dwreg; if (blacklist) { /* Set enabled/disable so that we will disable ASPM later */ link->aspm_enabled = ASPM_STATE_ALL; link->aspm_disable = ASPM_STATE_ALL; return; } /* Configure common clock before checking latencies */ pcie_aspm_configure_common_clock(link); /* Get upstream/downstream components' register state */ pcie_get_aspm_reg(parent, &upreg); child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); pcie_get_aspm_reg(child, &dwreg); /* * Setup L0s state * * Note that we must not enable L0s in either direction on a * given link unless components on both sides of the link each * support L0s. */ if (dwreg.support & upreg.support & PCIE_LINK_STATE_L0S) link->aspm_support |= ASPM_STATE_L0S; if (dwreg.enabled & PCIE_LINK_STATE_L0S) link->aspm_enabled |= ASPM_STATE_L0S_UP; if (upreg.enabled & PCIE_LINK_STATE_L0S) link->aspm_enabled |= ASPM_STATE_L0S_DW; link->latency_up.l0s = calc_l0s_latency(upreg.latency_encoding_l0s); link->latency_dw.l0s = calc_l0s_latency(dwreg.latency_encoding_l0s); /* Setup L1 state */ if (upreg.support & dwreg.support & PCIE_LINK_STATE_L1) link->aspm_support |= ASPM_STATE_L1; if (upreg.enabled & dwreg.enabled & PCIE_LINK_STATE_L1) link->aspm_enabled |= ASPM_STATE_L1; link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1); link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1); /* Save default state */ link->aspm_default = link->aspm_enabled; /* Setup initial capable state. Will be updated later */ link->aspm_capable = link->aspm_support; /* * If the downstream component has pci bridge function, don't * do ASPM for now. */ list_for_each_entry(child, &linkbus->devices, bus_list) { if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { link->aspm_disable = ASPM_STATE_ALL; break; } } /* Get and check endpoint acceptable latencies */ list_for_each_entry(child, &linkbus->devices, bus_list) { int pos; u32 reg32, encoding; struct aspm_latency *acceptable = &link->acceptable[PCI_FUNC(child->devfn)]; if (child->pcie_type != PCI_EXP_TYPE_ENDPOINT && child->pcie_type != PCI_EXP_TYPE_LEG_END) continue; pos = pci_pcie_cap(child); pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32); /* Calculate endpoint L0s acceptable latency */ encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; acceptable->l0s = calc_l0s_acceptable(encoding); /* Calculate endpoint L1 acceptable latency */ encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9; acceptable->l1 = calc_l1_acceptable(encoding); pcie_aspm_check_latency(child); } } static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) { u16 reg16; int pos = pci_pcie_cap(pdev); pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); reg16 &= ~0x3; reg16 |= val; pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); } static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) { u32 upstream = 0, dwstream = 0; struct pci_dev *child, *parent = link->pdev; struct pci_bus *linkbus = parent->subordinate; /* Nothing to do if the link is already in the requested state */ state &= (link->aspm_capable & ~link->aspm_disable); if (link->aspm_enabled == state) return; /* Convert ASPM state to upstream/downstream ASPM register state */ if (state & ASPM_STATE_L0S_UP) dwstream |= PCIE_LINK_STATE_L0S; if (state & ASPM_STATE_L0S_DW) upstream |= PCIE_LINK_STATE_L0S; if (state & ASPM_STATE_L1) { upstream |= PCIE_LINK_STATE_L1; dwstream |= PCIE_LINK_STATE_L1; } /* * Spec 2.0 suggests all functions should be configured the * same setting for ASPM. Enabling ASPM L1 should be done in * upstream component first and then downstream, and vice * versa for disabling ASPM L1. Spec doesn't mention L0S. */ if (state & ASPM_STATE_L1) pcie_config_aspm_dev(parent, upstream); list_for_each_entry(child, &linkbus->devices, bus_list) pcie_config_aspm_dev(child, dwstream); if (!(state & ASPM_STATE_L1)) pcie_config_aspm_dev(parent, upstream); link->aspm_enabled = state; } static void pcie_config_aspm_path(struct pcie_link_state *link) { while (link) { pcie_config_aspm_link(link, policy_to_aspm_state(link)); link = link->parent; } } static void free_link_state(struct pcie_link_state *link) { link->pdev->link_state = NULL; kfree(link); } static int pcie_aspm_sanity_check(struct pci_dev *pdev) { struct pci_dev *child; int pos; u32 reg32; /* * Some functions in a slot might not all be PCIe functions, * very strange. Disable ASPM for the whole slot */ list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { pos = pci_pcie_cap(child); if (!pos) return -EINVAL; /* * If ASPM is disabled then we're not going to change * the BIOS state. It's safe to continue even if it's a * pre-1.1 device */ if (aspm_disabled) continue; /* * Disable ASPM for pre-1.1 PCIe device, we follow MS to use * RBER bit to determine if a function is 1.1 version device */ pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32); if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { dev_printk(KERN_INFO, &child->dev, "disabling ASPM" " on pre-1.1 PCIe device. You can enable it" " with 'pcie_aspm=force'\n"); return -EINVAL; } } return 0; } static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) { struct pcie_link_state *link; link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) return NULL; INIT_LIST_HEAD(&link->sibling); INIT_LIST_HEAD(&link->children); INIT_LIST_HEAD(&link->link); link->pdev = pdev; if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) { struct pcie_link_state *parent; parent = pdev->bus->parent->self->link_state; if (!parent) { kfree(link); return NULL; } link->parent = parent; list_add(&link->link, &parent->children); } /* Setup a pointer to the root port link */ if (!link->parent) link->root = link; else link->root = link->parent->root; list_add(&link->sibling, &link_list); pdev->link_state = link; return link; } /* * pcie_aspm_init_link_state: Initiate PCI express link state. * It is called after the pcie and its children devices are scaned. * @pdev: the root port or switch downstream port */ void pcie_aspm_init_link_state(struct pci_dev *pdev) { struct pcie_link_state *link; int blacklist = !!pcie_aspm_sanity_check(pdev); if (!pci_is_pcie(pdev) || pdev->link_state) return; if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) return; /* VIA has a strange chipset, root port is under a bridge */ if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT && pdev->bus->self) return; down_read(&pci_bus_sem); if (list_empty(&pdev->subordinate->devices)) goto out; mutex_lock(&aspm_lock); link = alloc_pcie_link_state(pdev); if (!link) goto unlock; /* * Setup initial ASPM state. Note that we need to configure * upstream links also because capable state of them can be * update through pcie_aspm_cap_init(). */ pcie_aspm_cap_init(link, blacklist); /* Setup initial Clock PM state */ pcie_clkpm_cap_init(link, blacklist); /* * At this stage drivers haven't had an opportunity to change the * link policy setting. Enabling ASPM on broken hardware can cripple * it even before the driver has had a chance to disable ASPM, so * default to a safe level right now. If we're enabling ASPM beyond * the BIOS's expectation, we'll do so once pci_enable_device() is * called. */ if (aspm_policy != POLICY_POWERSAVE) { pcie_config_aspm_path(link); pcie_set_clkpm(link, policy_to_clkpm_state(link)); } unlock: mutex_unlock(&aspm_lock); out: up_read(&pci_bus_sem); } /* Recheck latencies and update aspm_capable for links under the root */ static void pcie_update_aspm_capable(struct pcie_link_state *root) { struct pcie_link_state *link; BUG_ON(root->parent); list_for_each_entry(link, &link_list, sibling) { if (link->root != root) continue; link->aspm_capable = link->aspm_support; } list_for_each_entry(link, &link_list, sibling) { struct pci_dev *child; struct pci_bus *linkbus = link->pdev->subordinate; if (link->root != root) continue; list_for_each_entry(child, &linkbus->devices, bus_list) { if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT) && (child->pcie_type != PCI_EXP_TYPE_LEG_END)) continue; pcie_aspm_check_latency(child); } } } /* @pdev: the endpoint device */ void pcie_aspm_exit_link_state(struct pci_dev *pdev) { struct pci_dev *parent = pdev->bus->self; struct pcie_link_state *link, *root, *parent_link; if (!pci_is_pcie(pdev) || !parent || !parent->link_state) return; if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) return; down_read(&pci_bus_sem); mutex_lock(&aspm_lock); /* * All PCIe functions are in one slot, remove one function will remove * the whole slot, so just wait until we are the last function left. */ if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices)) goto out; link = parent->link_state; root = link->root; parent_link = link->parent; /* All functions are removed, so just disable ASPM for the link */ pcie_config_aspm_link(link, 0); list_del(&link->sibling); list_del(&link->link); /* Clock PM is for endpoint device */ free_link_state(link); /* Recheck latencies and configure upstream links */ if (parent_link) { pcie_update_aspm_capable(root); pcie_config_aspm_path(parent_link); } out: mutex_unlock(&aspm_lock); up_read(&pci_bus_sem); } /* @pdev: the root port or switch downstream port */ void pcie_aspm_pm_state_change(struct pci_dev *pdev) { struct pcie_link_state *link = pdev->link_state; if (aspm_disabled || !pci_is_pcie(pdev) || !link) return; if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) return; /* * Devices changed PM state, we should recheck if latency * meets all functions' requirement */ down_read(&pci_bus_sem); mutex_lock(&aspm_lock); pcie_update_aspm_capable(link->root); pcie_config_aspm_path(link); mutex_unlock(&aspm_lock); up_read(&pci_bus_sem); } void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { struct pcie_link_state *link = pdev->link_state; if (aspm_disabled || !pci_is_pcie(pdev) || !link) return; if (aspm_policy != POLICY_POWERSAVE) return; if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) return; down_read(&pci_bus_sem); mutex_lock(&aspm_lock); pcie_config_aspm_path(link); pcie_set_clkpm(link, policy_to_clkpm_state(link)); mutex_unlock(&aspm_lock); up_read(&pci_bus_sem); } /* * pci_disable_link_state - disable pci device's link state, so the link will * never enter specific states */ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem, bool force) { struct pci_dev *parent = pdev->bus->self; struct pcie_link_state *link; if (aspm_disabled && !force) return; if (!pci_is_pcie(pdev)) return; if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT || pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) parent = pdev; if (!parent || !parent->link_state) return; if (sem) down_read(&pci_bus_sem); mutex_lock(&aspm_lock); link = parent->link_state; if (state & PCIE_LINK_STATE_L0S) link->aspm_disable |= ASPM_STATE_L0S; if (state & PCIE_LINK_STATE_L1) link->aspm_disable |= ASPM_STATE_L1; pcie_config_aspm_link(link, policy_to_aspm_state(link)); if (state & PCIE_LINK_STATE_CLKPM) { link->clkpm_capable = 0; pcie_set_clkpm(link, 0); } mutex_unlock(&aspm_lock); if (sem) up_read(&pci_bus_sem); } void pci_disable_link_state_locked(struct pci_dev *pdev, int state) { __pci_disable_link_state(pdev, state, false, false); } EXPORT_SYMBOL(pci_disable_link_state_locked); void pci_disable_link_state(struct pci_dev *pdev, int state) { __pci_disable_link_state(pdev, state, true, false); } EXPORT_SYMBOL(pci_disable_link_state); void pcie_clear_aspm(struct pci_bus *bus) { struct pci_dev *child; /* * Clear any ASPM setup that the firmware has carried out on this bus */ list_for_each_entry(child, &bus->devices, bus_list) { __pci_disable_link_state(child, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM, false, true); } } static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) { int i; struct pcie_link_state *link; if (aspm_disabled) return -EPERM; for (i = 0; i < ARRAY_SIZE(policy_str); i++) if (!strncmp(val, policy_str[i], strlen(policy_str[i]))) break; if (i >= ARRAY_SIZE(policy_str)) return -EINVAL; if (i == aspm_policy) return 0; down_read(&pci_bus_sem); mutex_lock(&aspm_lock); aspm_policy = i; list_for_each_entry(link, &link_list, sibling) { pcie_config_aspm_link(link, policy_to_aspm_state(link)); pcie_set_clkpm(link, policy_to_clkpm_state(link)); } mutex_unlock(&aspm_lock); up_read(&pci_bus_sem); return 0; } static int pcie_aspm_get_policy(char *buffer, struct kernel_param *kp) { int i, cnt = 0; for (i = 0; i < ARRAY_SIZE(policy_str); i++) if (i == aspm_policy) cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]); else cnt += sprintf(buffer + cnt, "%s ", policy_str[i]); return cnt; } module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy, NULL, 0644); #ifdef CONFIG_PCIEASPM_DEBUG static ssize_t link_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pci_device = to_pci_dev(dev); struct pcie_link_state *link_state = pci_device->link_state; return sprintf(buf, "%d\n", link_state->aspm_enabled); } static ssize_t link_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { struct pci_dev *pdev = to_pci_dev(dev); struct pcie_link_state *link, *root = pdev->link_state->root; u32 val = buf[0] - '0', state = 0; if (aspm_disabled) return -EPERM; if (n < 1 || val > 3) return -EINVAL; /* Convert requested state to ASPM state */ if (val & PCIE_LINK_STATE_L0S) state |= ASPM_STATE_L0S; if (val & PCIE_LINK_STATE_L1) state |= ASPM_STATE_L1; down_read(&pci_bus_sem); mutex_lock(&aspm_lock); list_for_each_entry(link, &link_list, sibling) { if (link->root != root) continue; pcie_config_aspm_link(link, state); } mutex_unlock(&aspm_lock); up_read(&pci_bus_sem); return n; } static ssize_t clk_ctl_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pci_device = to_pci_dev(dev); struct pcie_link_state *link_state = pci_device->link_state; return sprintf(buf, "%d\n", link_state->clkpm_enabled); } static ssize_t clk_ctl_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { struct pci_dev *pdev = to_pci_dev(dev); int state; if (n < 1) return -EINVAL; state = buf[0]-'0'; down_read(&pci_bus_sem); mutex_lock(&aspm_lock); pcie_set_clkpm_nocheck(pdev->link_state, !!state); mutex_unlock(&aspm_lock); up_read(&pci_bus_sem); return n; } static DEVICE_ATTR(link_state, 0644, link_state_show, link_state_store); static DEVICE_ATTR(clk_ctl, 0644, clk_ctl_show, clk_ctl_store); static char power_group[] = "power"; void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) { struct pcie_link_state *link_state = pdev->link_state; if (!pci_is_pcie(pdev) || (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) return; if (link_state->aspm_support) sysfs_add_file_to_group(&pdev->dev.kobj, &dev_attr_link_state.attr, power_group); if (link_state->clkpm_capable) sysfs_add_file_to_group(&pdev->dev.kobj, &dev_attr_clk_ctl.attr, power_group); } void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) { struct pcie_link_state *link_state = pdev->link_state; if (!pci_is_pcie(pdev) || (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) return; if (link_state->aspm_support) sysfs_remove_file_from_group(&pdev->dev.kobj, &dev_attr_link_state.attr, power_group); if (link_state->clkpm_capable) sysfs_remove_file_from_group(&pdev->dev.kobj, &dev_attr_clk_ctl.attr, power_group); } #endif static int __init pcie_aspm_disable(char *str) { if (!strcmp(str, "off")) { aspm_policy = POLICY_DEFAULT; aspm_disabled = 1; aspm_support_enabled = false; printk(KERN_INFO "PCIe ASPM is disabled\n"); } else if (!strcmp(str, "force")) { aspm_force = 1; printk(KERN_INFO "PCIe ASPM is forcibly enabled\n"); } return 1; } __setup("pcie_aspm=", pcie_aspm_disable); void pcie_no_aspm(void) { /* * Disabling ASPM is intended to prevent the kernel from modifying * existing hardware state, not to clear existing state. To that end: * (a) set policy to POLICY_DEFAULT in order to avoid changing state * (b) prevent userspace from changing policy */ if (!aspm_force) { aspm_policy = POLICY_DEFAULT; aspm_disabled = 1; } } /** * pcie_aspm_enabled - is PCIe ASPM enabled? * * Returns true if ASPM has not been disabled by the command-line option * pcie_aspm=off. **/ int pcie_aspm_enabled(void) { return !aspm_disabled; } EXPORT_SYMBOL(pcie_aspm_enabled); bool pcie_aspm_support_enabled(void) { return aspm_support_enabled; } EXPORT_SYMBOL(pcie_aspm_support_enabled);
gpl-2.0
djmax81/android_kernel_samsung_exynos5420
arch/arm/mach-tegra/tegra30_clocks.c
4680
91920
/* * arch/arm/mach-tegra/tegra30_clocks.c * * Copyright (c) 2010-2011 NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/cpufreq.h> #include <linux/syscore_ops.h> #include <asm/clkdev.h> #include <mach/iomap.h> #include "clock.h" #include "fuse.h" #define USE_PLL_LOCK_BITS 0 #define RST_DEVICES_L 0x004 #define RST_DEVICES_H 0x008 #define RST_DEVICES_U 0x00C #define RST_DEVICES_V 0x358 #define RST_DEVICES_W 0x35C #define RST_DEVICES_SET_L 0x300 #define RST_DEVICES_CLR_L 0x304 #define RST_DEVICES_SET_V 0x430 #define RST_DEVICES_CLR_V 0x434 #define RST_DEVICES_NUM 5 #define CLK_OUT_ENB_L 0x010 #define CLK_OUT_ENB_H 0x014 #define CLK_OUT_ENB_U 0x018 #define CLK_OUT_ENB_V 0x360 #define CLK_OUT_ENB_W 0x364 #define CLK_OUT_ENB_SET_L 0x320 #define CLK_OUT_ENB_CLR_L 0x324 #define CLK_OUT_ENB_SET_V 0x440 #define CLK_OUT_ENB_CLR_V 0x444 #define CLK_OUT_ENB_NUM 5 #define RST_DEVICES_V_SWR_CPULP_RST_DIS (0x1 << 1) #define CLK_OUT_ENB_V_CLK_ENB_CPULP_EN (0x1 << 1) #define PERIPH_CLK_TO_BIT(c) (1 << (c->u.periph.clk_num % 32)) #define PERIPH_CLK_TO_RST_REG(c) \ periph_clk_to_reg((c), RST_DEVICES_L, RST_DEVICES_V, 4) #define PERIPH_CLK_TO_RST_SET_REG(c) \ periph_clk_to_reg((c), RST_DEVICES_SET_L, RST_DEVICES_SET_V, 8) #define PERIPH_CLK_TO_RST_CLR_REG(c) \ periph_clk_to_reg((c), RST_DEVICES_CLR_L, RST_DEVICES_CLR_V, 8) #define PERIPH_CLK_TO_ENB_REG(c) \ periph_clk_to_reg((c), CLK_OUT_ENB_L, CLK_OUT_ENB_V, 4) #define PERIPH_CLK_TO_ENB_SET_REG(c) \ periph_clk_to_reg((c), CLK_OUT_ENB_SET_L, CLK_OUT_ENB_SET_V, 8) #define PERIPH_CLK_TO_ENB_CLR_REG(c) \ periph_clk_to_reg((c), CLK_OUT_ENB_CLR_L, CLK_OUT_ENB_CLR_V, 8) #define CLK_MASK_ARM 0x44 #define MISC_CLK_ENB 0x48 #define OSC_CTRL 0x50 #define OSC_CTRL_OSC_FREQ_MASK (0xF<<28) #define OSC_CTRL_OSC_FREQ_13MHZ (0x0<<28) #define OSC_CTRL_OSC_FREQ_19_2MHZ (0x4<<28) #define OSC_CTRL_OSC_FREQ_12MHZ (0x8<<28) #define OSC_CTRL_OSC_FREQ_26MHZ (0xC<<28) #define OSC_CTRL_OSC_FREQ_16_8MHZ (0x1<<28) #define OSC_CTRL_OSC_FREQ_38_4MHZ (0x5<<28) #define OSC_CTRL_OSC_FREQ_48MHZ (0x9<<28) #define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK) #define OSC_CTRL_PLL_REF_DIV_MASK (3<<26) #define OSC_CTRL_PLL_REF_DIV_1 (0<<26) #define OSC_CTRL_PLL_REF_DIV_2 (1<<26) #define OSC_CTRL_PLL_REF_DIV_4 (2<<26) #define OSC_FREQ_DET 0x58 #define OSC_FREQ_DET_TRIG (1<<31) #define OSC_FREQ_DET_STATUS 0x5C #define OSC_FREQ_DET_BUSY (1<<31) #define OSC_FREQ_DET_CNT_MASK 0xFFFF #define PERIPH_CLK_SOURCE_I2S1 0x100 #define PERIPH_CLK_SOURCE_EMC 0x19c #define PERIPH_CLK_SOURCE_OSC 0x1fc #define PERIPH_CLK_SOURCE_NUM1 \ ((PERIPH_CLK_SOURCE_OSC - PERIPH_CLK_SOURCE_I2S1) / 4) #define PERIPH_CLK_SOURCE_G3D2 0x3b0 #define PERIPH_CLK_SOURCE_SE 0x42c #define PERIPH_CLK_SOURCE_NUM2 \ ((PERIPH_CLK_SOURCE_SE - PERIPH_CLK_SOURCE_G3D2) / 4 + 1) #define AUDIO_DLY_CLK 0x49c #define AUDIO_SYNC_CLK_SPDIF 0x4b4 #define PERIPH_CLK_SOURCE_NUM3 \ ((AUDIO_SYNC_CLK_SPDIF - AUDIO_DLY_CLK) / 4 + 1) #define PERIPH_CLK_SOURCE_NUM (PERIPH_CLK_SOURCE_NUM1 + \ PERIPH_CLK_SOURCE_NUM2 + \ PERIPH_CLK_SOURCE_NUM3) #define CPU_SOFTRST_CTRL 0x380 #define PERIPH_CLK_SOURCE_DIVU71_MASK 0xFF #define PERIPH_CLK_SOURCE_DIVU16_MASK 0xFFFF #define PERIPH_CLK_SOURCE_DIV_SHIFT 0 #define PERIPH_CLK_SOURCE_DIVIDLE_SHIFT 8 #define PERIPH_CLK_SOURCE_DIVIDLE_VAL 50 #define PERIPH_CLK_UART_DIV_ENB (1<<24) #define PERIPH_CLK_VI_SEL_EX_SHIFT 24 #define PERIPH_CLK_VI_SEL_EX_MASK (0x3<<PERIPH_CLK_VI_SEL_EX_SHIFT) #define PERIPH_CLK_NAND_DIV_EX_ENB (1<<8) #define PERIPH_CLK_DTV_POLARITY_INV (1<<25) #define AUDIO_SYNC_SOURCE_MASK 0x0F #define AUDIO_SYNC_DISABLE_BIT 0x10 #define AUDIO_SYNC_TAP_NIBBLE_SHIFT(c) ((c->reg_shift - 24) * 4) #define PLL_BASE 0x0 #define PLL_BASE_BYPASS (1<<31) #define PLL_BASE_ENABLE (1<<30) #define PLL_BASE_REF_ENABLE (1<<29) #define PLL_BASE_OVERRIDE (1<<28) #define PLL_BASE_LOCK (1<<27) #define PLL_BASE_DIVP_MASK (0x7<<20) #define PLL_BASE_DIVP_SHIFT 20 #define PLL_BASE_DIVN_MASK (0x3FF<<8) #define PLL_BASE_DIVN_SHIFT 8 #define PLL_BASE_DIVM_MASK (0x1F) #define PLL_BASE_DIVM_SHIFT 0 #define PLL_OUT_RATIO_MASK (0xFF<<8) #define PLL_OUT_RATIO_SHIFT 8 #define PLL_OUT_OVERRIDE (1<<2) #define PLL_OUT_CLKEN (1<<1) #define PLL_OUT_RESET_DISABLE (1<<0) #define PLL_MISC(c) \ (((c)->flags & PLL_ALT_MISC_REG) ? 0x4 : 0xc) #define PLL_MISC_LOCK_ENABLE(c) \ (((c)->flags & (PLLU | PLLD)) ? (1<<22) : (1<<18)) #define PLL_MISC_DCCON_SHIFT 20 #define PLL_MISC_CPCON_SHIFT 8 #define PLL_MISC_CPCON_MASK (0xF<<PLL_MISC_CPCON_SHIFT) #define PLL_MISC_LFCON_SHIFT 4 #define PLL_MISC_LFCON_MASK (0xF<<PLL_MISC_LFCON_SHIFT) #define PLL_MISC_VCOCON_SHIFT 0 #define PLL_MISC_VCOCON_MASK (0xF<<PLL_MISC_VCOCON_SHIFT) #define PLLD_MISC_CLKENABLE (1<<30) #define PLLU_BASE_POST_DIV (1<<20) #define PLLD_BASE_DSIB_MUX_SHIFT 25 #define PLLD_BASE_DSIB_MUX_MASK (1<<PLLD_BASE_DSIB_MUX_SHIFT) #define PLLD_BASE_CSI_CLKENABLE (1<<26) #define PLLD_MISC_DSI_CLKENABLE (1<<30) #define PLLD_MISC_DIV_RST (1<<23) #define PLLD_MISC_DCCON_SHIFT 12 #define PLLDU_LFCON_SET_DIVN 600 /* FIXME: OUT_OF_TABLE_CPCON per pll */ #define OUT_OF_TABLE_CPCON 0x8 #define SUPER_CLK_MUX 0x00 #define SUPER_STATE_SHIFT 28 #define SUPER_STATE_MASK (0xF << SUPER_STATE_SHIFT) #define SUPER_STATE_STANDBY (0x0 << SUPER_STATE_SHIFT) #define SUPER_STATE_IDLE (0x1 << SUPER_STATE_SHIFT) #define SUPER_STATE_RUN (0x2 << SUPER_STATE_SHIFT) #define SUPER_STATE_IRQ (0x3 << SUPER_STATE_SHIFT) #define SUPER_STATE_FIQ (0x4 << SUPER_STATE_SHIFT) #define SUPER_LP_DIV2_BYPASS (0x1 << 16) #define SUPER_SOURCE_MASK 0xF #define SUPER_FIQ_SOURCE_SHIFT 12 #define SUPER_IRQ_SOURCE_SHIFT 8 #define SUPER_RUN_SOURCE_SHIFT 4 #define SUPER_IDLE_SOURCE_SHIFT 0 #define SUPER_CLK_DIVIDER 0x04 #define SUPER_CLOCK_DIV_U71_SHIFT 16 #define SUPER_CLOCK_DIV_U71_MASK (0xff << SUPER_CLOCK_DIV_U71_SHIFT) /* guarantees safe cpu backup */ #define SUPER_CLOCK_DIV_U71_MIN 0x2 #define BUS_CLK_DISABLE (1<<3) #define BUS_CLK_DIV_MASK 0x3 #define PMC_CTRL 0x0 #define PMC_CTRL_BLINK_ENB (1 << 7) #define PMC_DPD_PADS_ORIDE 0x1c #define PMC_DPD_PADS_ORIDE_BLINK_ENB (1 << 20) #define PMC_BLINK_TIMER_DATA_ON_SHIFT 0 #define PMC_BLINK_TIMER_DATA_ON_MASK 0x7fff #define PMC_BLINK_TIMER_ENB (1 << 15) #define PMC_BLINK_TIMER_DATA_OFF_SHIFT 16 #define PMC_BLINK_TIMER_DATA_OFF_MASK 0xffff #define PMC_PLLP_WB0_OVERRIDE 0xf8 #define PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE (1 << 12) #define UTMIP_PLL_CFG2 0x488 #define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xfff) << 6) #define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18) #define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN (1 << 0) #define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN (1 << 2) #define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN (1 << 4) #define UTMIP_PLL_CFG1 0x484 #define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27) #define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0) #define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN (1 << 14) #define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN (1 << 12) #define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN (1 << 16) #define PLLE_BASE_CML_ENABLE (1<<31) #define PLLE_BASE_ENABLE (1<<30) #define PLLE_BASE_DIVCML_SHIFT 24 #define PLLE_BASE_DIVCML_MASK (0xf<<PLLE_BASE_DIVCML_SHIFT) #define PLLE_BASE_DIVP_SHIFT 16 #define PLLE_BASE_DIVP_MASK (0x3f<<PLLE_BASE_DIVP_SHIFT) #define PLLE_BASE_DIVN_SHIFT 8 #define PLLE_BASE_DIVN_MASK (0xFF<<PLLE_BASE_DIVN_SHIFT) #define PLLE_BASE_DIVM_SHIFT 0 #define PLLE_BASE_DIVM_MASK (0xFF<<PLLE_BASE_DIVM_SHIFT) #define PLLE_BASE_DIV_MASK \ (PLLE_BASE_DIVCML_MASK | PLLE_BASE_DIVP_MASK | \ PLLE_BASE_DIVN_MASK | PLLE_BASE_DIVM_MASK) #define PLLE_BASE_DIV(m, n, p, cml) \ (((cml)<<PLLE_BASE_DIVCML_SHIFT) | ((p)<<PLLE_BASE_DIVP_SHIFT) | \ ((n)<<PLLE_BASE_DIVN_SHIFT) | ((m)<<PLLE_BASE_DIVM_SHIFT)) #define PLLE_MISC_SETUP_BASE_SHIFT 16 #define PLLE_MISC_SETUP_BASE_MASK (0xFFFF<<PLLE_MISC_SETUP_BASE_SHIFT) #define PLLE_MISC_READY (1<<15) #define PLLE_MISC_LOCK (1<<11) #define PLLE_MISC_LOCK_ENABLE (1<<9) #define PLLE_MISC_SETUP_EX_SHIFT 2 #define PLLE_MISC_SETUP_EX_MASK (0x3<<PLLE_MISC_SETUP_EX_SHIFT) #define PLLE_MISC_SETUP_MASK \ (PLLE_MISC_SETUP_BASE_MASK | PLLE_MISC_SETUP_EX_MASK) #define PLLE_MISC_SETUP_VALUE \ ((0x7<<PLLE_MISC_SETUP_BASE_SHIFT) | (0x0<<PLLE_MISC_SETUP_EX_SHIFT)) #define PLLE_SS_CTRL 0x68 #define PLLE_SS_INCINTRV_SHIFT 24 #define PLLE_SS_INCINTRV_MASK (0x3f<<PLLE_SS_INCINTRV_SHIFT) #define PLLE_SS_INC_SHIFT 16 #define PLLE_SS_INC_MASK (0xff<<PLLE_SS_INC_SHIFT) #define PLLE_SS_MAX_SHIFT 0 #define PLLE_SS_MAX_MASK (0x1ff<<PLLE_SS_MAX_SHIFT) #define PLLE_SS_COEFFICIENTS_MASK \ (PLLE_SS_INCINTRV_MASK | PLLE_SS_INC_MASK | PLLE_SS_MAX_MASK) #define PLLE_SS_COEFFICIENTS_12MHZ \ ((0x18<<PLLE_SS_INCINTRV_SHIFT) | (0x1<<PLLE_SS_INC_SHIFT) | \ (0x24<<PLLE_SS_MAX_SHIFT)) #define PLLE_SS_DISABLE ((1<<12) | (1<<11) | (1<<10)) #define PLLE_AUX 0x48c #define PLLE_AUX_PLLP_SEL (1<<2) #define PLLE_AUX_CML_SATA_ENABLE (1<<1) #define PLLE_AUX_CML_PCIE_ENABLE (1<<0) #define PMC_SATA_PWRGT 0x1ac #define PMC_SATA_PWRGT_PLLE_IDDQ_VALUE (1<<5) #define PMC_SATA_PWRGT_PLLE_IDDQ_SWCTL (1<<4) #define ROUND_DIVIDER_UP 0 #define ROUND_DIVIDER_DOWN 1 /* FIXME: recommended safety delay after lock is detected */ #define PLL_POST_LOCK_DELAY 100 /** * Structure defining the fields for USB UTMI clocks Parameters. */ struct utmi_clk_param { /* Oscillator Frequency in KHz */ u32 osc_frequency; /* UTMIP PLL Enable Delay Count */ u8 enable_delay_count; /* UTMIP PLL Stable count */ u8 stable_count; /* UTMIP PLL Active delay count */ u8 active_delay_count; /* UTMIP PLL Xtal frequency count */ u8 xtal_freq_count; }; static const struct utmi_clk_param utmi_parameters[] = { { .osc_frequency = 13000000, .enable_delay_count = 0x02, .stable_count = 0x33, .active_delay_count = 0x05, .xtal_freq_count = 0x7F }, { .osc_frequency = 19200000, .enable_delay_count = 0x03, .stable_count = 0x4B, .active_delay_count = 0x06, .xtal_freq_count = 0xBB}, { .osc_frequency = 12000000, .enable_delay_count = 0x02, .stable_count = 0x2F, .active_delay_count = 0x04, .xtal_freq_count = 0x76 }, { .osc_frequency = 26000000, .enable_delay_count = 0x04, .stable_count = 0x66, .active_delay_count = 0x09, .xtal_freq_count = 0xFE }, { .osc_frequency = 16800000, .enable_delay_count = 0x03, .stable_count = 0x41, .active_delay_count = 0x0A, .xtal_freq_count = 0xA4 }, }; static void __iomem *reg_clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE); static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE); static void __iomem *misc_gp_hidrev_base = IO_ADDRESS(TEGRA_APB_MISC_BASE); #define MISC_GP_HIDREV 0x804 /* * Some peripheral clocks share an enable bit, so refcount the enable bits * in registers CLK_ENABLE_L, ... CLK_ENABLE_W */ static int tegra_periph_clk_enable_refcount[CLK_OUT_ENB_NUM * 32]; #define clk_writel(value, reg) \ __raw_writel(value, (u32)reg_clk_base + (reg)) #define clk_readl(reg) \ __raw_readl((u32)reg_clk_base + (reg)) #define pmc_writel(value, reg) \ __raw_writel(value, (u32)reg_pmc_base + (reg)) #define pmc_readl(reg) \ __raw_readl((u32)reg_pmc_base + (reg)) #define chipid_readl() \ __raw_readl((u32)misc_gp_hidrev_base + MISC_GP_HIDREV) #define clk_writel_delay(value, reg) \ do { \ __raw_writel((value), (u32)reg_clk_base + (reg)); \ udelay(2); \ } while (0) static inline int clk_set_div(struct clk *c, u32 n) { return clk_set_rate(c, (clk_get_rate(c->parent) + n-1) / n); } static inline u32 periph_clk_to_reg( struct clk *c, u32 reg_L, u32 reg_V, int offs) { u32 reg = c->u.periph.clk_num / 32; BUG_ON(reg >= RST_DEVICES_NUM); if (reg < 3) reg = reg_L + (reg * offs); else reg = reg_V + ((reg - 3) * offs); return reg; } static unsigned long clk_measure_input_freq(void) { u32 clock_autodetect; clk_writel(OSC_FREQ_DET_TRIG | 1, OSC_FREQ_DET); do {} while (clk_readl(OSC_FREQ_DET_STATUS) & OSC_FREQ_DET_BUSY); clock_autodetect = clk_readl(OSC_FREQ_DET_STATUS); if (clock_autodetect >= 732 - 3 && clock_autodetect <= 732 + 3) { return 12000000; } else if (clock_autodetect >= 794 - 3 && clock_autodetect <= 794 + 3) { return 13000000; } else if (clock_autodetect >= 1172 - 3 && clock_autodetect <= 1172 + 3) { return 19200000; } else if (clock_autodetect >= 1587 - 3 && clock_autodetect <= 1587 + 3) { return 26000000; } else if (clock_autodetect >= 1025 - 3 && clock_autodetect <= 1025 + 3) { return 16800000; } else if (clock_autodetect >= 2344 - 3 && clock_autodetect <= 2344 + 3) { return 38400000; } else if (clock_autodetect >= 2928 - 3 && clock_autodetect <= 2928 + 3) { return 48000000; } else { pr_err("%s: Unexpected clock autodetect value %d", __func__, clock_autodetect); BUG(); return 0; } } static int clk_div71_get_divider(unsigned long parent_rate, unsigned long rate, u32 flags, u32 round_mode) { s64 divider_u71 = parent_rate; if (!rate) return -EINVAL; if (!(flags & DIV_U71_INT)) divider_u71 *= 2; if (round_mode == ROUND_DIVIDER_UP) divider_u71 += rate - 1; do_div(divider_u71, rate); if (flags & DIV_U71_INT) divider_u71 *= 2; if (divider_u71 - 2 < 0) return 0; if (divider_u71 - 2 > 255) return -EINVAL; return divider_u71 - 2; } static int clk_div16_get_divider(unsigned long parent_rate, unsigned long rate) { s64 divider_u16; divider_u16 = parent_rate; if (!rate) return -EINVAL; divider_u16 += rate - 1; do_div(divider_u16, rate); if (divider_u16 - 1 < 0) return 0; if (divider_u16 - 1 > 0xFFFF) return -EINVAL; return divider_u16 - 1; } /* clk_m functions */ static unsigned long tegra30_clk_m_autodetect_rate(struct clk *c) { u32 osc_ctrl = clk_readl(OSC_CTRL); u32 auto_clock_control = osc_ctrl & ~OSC_CTRL_OSC_FREQ_MASK; u32 pll_ref_div = osc_ctrl & OSC_CTRL_PLL_REF_DIV_MASK; c->rate = clk_measure_input_freq(); switch (c->rate) { case 12000000: auto_clock_control |= OSC_CTRL_OSC_FREQ_12MHZ; BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1); break; case 13000000: auto_clock_control |= OSC_CTRL_OSC_FREQ_13MHZ; BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1); break; case 19200000: auto_clock_control |= OSC_CTRL_OSC_FREQ_19_2MHZ; BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1); break; case 26000000: auto_clock_control |= OSC_CTRL_OSC_FREQ_26MHZ; BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1); break; case 16800000: auto_clock_control |= OSC_CTRL_OSC_FREQ_16_8MHZ; BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1); break; case 38400000: auto_clock_control |= OSC_CTRL_OSC_FREQ_38_4MHZ; BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_2); break; case 48000000: auto_clock_control |= OSC_CTRL_OSC_FREQ_48MHZ; BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_4); break; default: pr_err("%s: Unexpected clock rate %ld", __func__, c->rate); BUG(); } clk_writel(auto_clock_control, OSC_CTRL); return c->rate; } static void tegra30_clk_m_init(struct clk *c) { pr_debug("%s on clock %s\n", __func__, c->name); tegra30_clk_m_autodetect_rate(c); } static int tegra30_clk_m_enable(struct clk *c) { pr_debug("%s on clock %s\n", __func__, c->name); return 0; } static void tegra30_clk_m_disable(struct clk *c) { pr_debug("%s on clock %s\n", __func__, c->name); WARN(1, "Attempting to disable main SoC clock\n"); } static struct clk_ops tegra_clk_m_ops = { .init = tegra30_clk_m_init, .enable = tegra30_clk_m_enable, .disable = tegra30_clk_m_disable, }; static struct clk_ops tegra_clk_m_div_ops = { .enable = tegra30_clk_m_enable, }; /* PLL reference divider functions */ static void tegra30_pll_ref_init(struct clk *c) { u32 pll_ref_div = clk_readl(OSC_CTRL) & OSC_CTRL_PLL_REF_DIV_MASK; pr_debug("%s on clock %s\n", __func__, c->name); switch (pll_ref_div) { case OSC_CTRL_PLL_REF_DIV_1: c->div = 1; break; case OSC_CTRL_PLL_REF_DIV_2: c->div = 2; break; case OSC_CTRL_PLL_REF_DIV_4: c->div = 4; break; default: pr_err("%s: Invalid pll ref divider %d", __func__, pll_ref_div); BUG(); } c->mul = 1; c->state = ON; } static struct clk_ops tegra_pll_ref_ops = { .init = tegra30_pll_ref_init, .enable = tegra30_clk_m_enable, .disable = tegra30_clk_m_disable, }; /* super clock functions */ /* "super clocks" on tegra30 have two-stage muxes, fractional 7.1 divider and * clock skipping super divider. We will ignore the clock skipping divider, * since we can't lower the voltage when using the clock skip, but we can if * we lower the PLL frequency. We will use 7.1 divider for CPU super-clock * only when its parent is a fixed rate PLL, since we can't change PLL rate * in this case. */ static void tegra30_super_clk_init(struct clk *c) { u32 val; int source; int shift; const struct clk_mux_sel *sel; val = clk_readl(c->reg + SUPER_CLK_MUX); c->state = ON; BUG_ON(((val & SUPER_STATE_MASK) != SUPER_STATE_RUN) && ((val & SUPER_STATE_MASK) != SUPER_STATE_IDLE)); shift = ((val & SUPER_STATE_MASK) == SUPER_STATE_IDLE) ? SUPER_IDLE_SOURCE_SHIFT : SUPER_RUN_SOURCE_SHIFT; source = (val >> shift) & SUPER_SOURCE_MASK; if (c->flags & DIV_2) source |= val & SUPER_LP_DIV2_BYPASS; for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->value == source) break; } BUG_ON(sel->input == NULL); c->parent = sel->input; if (c->flags & DIV_U71) { /* Init safe 7.1 divider value (does not affect PLLX path) */ clk_writel(SUPER_CLOCK_DIV_U71_MIN << SUPER_CLOCK_DIV_U71_SHIFT, c->reg + SUPER_CLK_DIVIDER); c->mul = 2; c->div = 2; if (!(c->parent->flags & PLLX)) c->div += SUPER_CLOCK_DIV_U71_MIN; } else clk_writel(0, c->reg + SUPER_CLK_DIVIDER); } static int tegra30_super_clk_enable(struct clk *c) { return 0; } static void tegra30_super_clk_disable(struct clk *c) { /* since tegra 3 has 2 CPU super clocks - low power lp-mode clock and geared up g-mode super clock - mode switch may request to disable either of them; accept request with no affect on h/w */ } static int tegra30_super_clk_set_parent(struct clk *c, struct clk *p) { u32 val; const struct clk_mux_sel *sel; int shift; val = clk_readl(c->reg + SUPER_CLK_MUX); BUG_ON(((val & SUPER_STATE_MASK) != SUPER_STATE_RUN) && ((val & SUPER_STATE_MASK) != SUPER_STATE_IDLE)); shift = ((val & SUPER_STATE_MASK) == SUPER_STATE_IDLE) ? SUPER_IDLE_SOURCE_SHIFT : SUPER_RUN_SOURCE_SHIFT; for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->input == p) { /* For LP mode super-clock switch between PLLX direct and divided-by-2 outputs is allowed only when other than PLLX clock source is current parent */ if ((c->flags & DIV_2) && (p->flags & PLLX) && ((sel->value ^ val) & SUPER_LP_DIV2_BYPASS)) { if (c->parent->flags & PLLX) return -EINVAL; val ^= SUPER_LP_DIV2_BYPASS; clk_writel_delay(val, c->reg); } val &= ~(SUPER_SOURCE_MASK << shift); val |= (sel->value & SUPER_SOURCE_MASK) << shift; /* 7.1 divider for CPU super-clock does not affect PLLX path */ if (c->flags & DIV_U71) { u32 div = 0; if (!(p->flags & PLLX)) { div = clk_readl(c->reg + SUPER_CLK_DIVIDER); div &= SUPER_CLOCK_DIV_U71_MASK; div >>= SUPER_CLOCK_DIV_U71_SHIFT; } c->div = div + 2; c->mul = 2; } if (c->refcnt) clk_enable(p); clk_writel_delay(val, c->reg); if (c->refcnt && c->parent) clk_disable(c->parent); clk_reparent(c, p); return 0; } } return -EINVAL; } /* * Do not use super clocks "skippers", since dividing using a clock skipper * does not allow the voltage to be scaled down. Instead adjust the rate of * the parent clock. This requires that the parent of a super clock have no * other children, otherwise the rate will change underneath the other * children. Special case: if fixed rate PLL is CPU super clock parent the * rate of this PLL can't be changed, and it has many other children. In * this case use 7.1 fractional divider to adjust the super clock rate. */ static int tegra30_super_clk_set_rate(struct clk *c, unsigned long rate) { if ((c->flags & DIV_U71) && (c->parent->flags & PLL_FIXED)) { int div = clk_div71_get_divider(c->parent->u.pll.fixed_rate, rate, c->flags, ROUND_DIVIDER_DOWN); div = max(div, SUPER_CLOCK_DIV_U71_MIN); clk_writel(div << SUPER_CLOCK_DIV_U71_SHIFT, c->reg + SUPER_CLK_DIVIDER); c->div = div + 2; c->mul = 2; return 0; } return clk_set_rate(c->parent, rate); } static struct clk_ops tegra_super_ops = { .init = tegra30_super_clk_init, .enable = tegra30_super_clk_enable, .disable = tegra30_super_clk_disable, .set_parent = tegra30_super_clk_set_parent, .set_rate = tegra30_super_clk_set_rate, }; static int tegra30_twd_clk_set_rate(struct clk *c, unsigned long rate) { /* The input value 'rate' is the clock rate of the CPU complex. */ c->rate = (rate * c->mul) / c->div; return 0; } static struct clk_ops tegra30_twd_ops = { .set_rate = tegra30_twd_clk_set_rate, }; /* Blink output functions */ static void tegra30_blink_clk_init(struct clk *c) { u32 val; val = pmc_readl(PMC_CTRL); c->state = (val & PMC_CTRL_BLINK_ENB) ? ON : OFF; c->mul = 1; val = pmc_readl(c->reg); if (val & PMC_BLINK_TIMER_ENB) { unsigned int on_off; on_off = (val >> PMC_BLINK_TIMER_DATA_ON_SHIFT) & PMC_BLINK_TIMER_DATA_ON_MASK; val >>= PMC_BLINK_TIMER_DATA_OFF_SHIFT; val &= PMC_BLINK_TIMER_DATA_OFF_MASK; on_off += val; /* each tick in the blink timer is 4 32KHz clocks */ c->div = on_off * 4; } else { c->div = 1; } } static int tegra30_blink_clk_enable(struct clk *c) { u32 val; val = pmc_readl(PMC_DPD_PADS_ORIDE); pmc_writel(val | PMC_DPD_PADS_ORIDE_BLINK_ENB, PMC_DPD_PADS_ORIDE); val = pmc_readl(PMC_CTRL); pmc_writel(val | PMC_CTRL_BLINK_ENB, PMC_CTRL); return 0; } static void tegra30_blink_clk_disable(struct clk *c) { u32 val; val = pmc_readl(PMC_CTRL); pmc_writel(val & ~PMC_CTRL_BLINK_ENB, PMC_CTRL); val = pmc_readl(PMC_DPD_PADS_ORIDE); pmc_writel(val & ~PMC_DPD_PADS_ORIDE_BLINK_ENB, PMC_DPD_PADS_ORIDE); } static int tegra30_blink_clk_set_rate(struct clk *c, unsigned long rate) { unsigned long parent_rate = clk_get_rate(c->parent); if (rate >= parent_rate) { c->div = 1; pmc_writel(0, c->reg); } else { unsigned int on_off; u32 val; on_off = DIV_ROUND_UP(parent_rate / 8, rate); c->div = on_off * 8; val = (on_off & PMC_BLINK_TIMER_DATA_ON_MASK) << PMC_BLINK_TIMER_DATA_ON_SHIFT; on_off &= PMC_BLINK_TIMER_DATA_OFF_MASK; on_off <<= PMC_BLINK_TIMER_DATA_OFF_SHIFT; val |= on_off; val |= PMC_BLINK_TIMER_ENB; pmc_writel(val, c->reg); } return 0; } static struct clk_ops tegra_blink_clk_ops = { .init = &tegra30_blink_clk_init, .enable = &tegra30_blink_clk_enable, .disable = &tegra30_blink_clk_disable, .set_rate = &tegra30_blink_clk_set_rate, }; /* PLL Functions */ static int tegra30_pll_clk_wait_for_lock(struct clk *c, u32 lock_reg, u32 lock_bit) { #if USE_PLL_LOCK_BITS int i; for (i = 0; i < c->u.pll.lock_delay; i++) { if (clk_readl(lock_reg) & lock_bit) { udelay(PLL_POST_LOCK_DELAY); return 0; } udelay(2); /* timeout = 2 * lock time */ } pr_err("Timed out waiting for lock bit on pll %s", c->name); return -1; #endif udelay(c->u.pll.lock_delay); return 0; } static void tegra30_utmi_param_configure(struct clk *c) { u32 reg; int i; unsigned long main_rate = clk_get_rate(c->parent->parent); for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) { if (main_rate == utmi_parameters[i].osc_frequency) break; } if (i >= ARRAY_SIZE(utmi_parameters)) { pr_err("%s: Unexpected main rate %lu\n", __func__, main_rate); return; } reg = clk_readl(UTMIP_PLL_CFG2); /* Program UTMIP PLL stable and active counts */ /* [FIXME] arclk_rst.h says WRONG! This should be 1ms -> 0x50 Check! */ reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0); reg |= UTMIP_PLL_CFG2_STABLE_COUNT( utmi_parameters[i].stable_count); reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0); reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT( utmi_parameters[i].active_delay_count); /* Remove power downs from UTMIP PLL control bits */ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN; reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN; reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN; clk_writel(reg, UTMIP_PLL_CFG2); /* Program UTMIP PLL delay and oscillator frequency counts */ reg = clk_readl(UTMIP_PLL_CFG1); reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0); reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT( utmi_parameters[i].enable_delay_count); reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0); reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT( utmi_parameters[i].xtal_freq_count); /* Remove power downs from UTMIP PLL control bits */ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN; reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN; clk_writel(reg, UTMIP_PLL_CFG1); } static void tegra30_pll_clk_init(struct clk *c) { u32 val = clk_readl(c->reg + PLL_BASE); c->state = (val & PLL_BASE_ENABLE) ? ON : OFF; if (c->flags & PLL_FIXED && !(val & PLL_BASE_OVERRIDE)) { const struct clk_pll_freq_table *sel; unsigned long input_rate = clk_get_rate(c->parent); for (sel = c->u.pll.freq_table; sel->input_rate != 0; sel++) { if (sel->input_rate == input_rate && sel->output_rate == c->u.pll.fixed_rate) { c->mul = sel->n; c->div = sel->m * sel->p; return; } } pr_err("Clock %s has unknown fixed frequency\n", c->name); BUG(); } else if (val & PLL_BASE_BYPASS) { c->mul = 1; c->div = 1; } else { c->mul = (val & PLL_BASE_DIVN_MASK) >> PLL_BASE_DIVN_SHIFT; c->div = (val & PLL_BASE_DIVM_MASK) >> PLL_BASE_DIVM_SHIFT; if (c->flags & PLLU) c->div *= (val & PLLU_BASE_POST_DIV) ? 1 : 2; else c->div *= (0x1 << ((val & PLL_BASE_DIVP_MASK) >> PLL_BASE_DIVP_SHIFT)); if (c->flags & PLL_FIXED) { unsigned long rate = clk_get_rate_locked(c); BUG_ON(rate != c->u.pll.fixed_rate); } } if (c->flags & PLLU) tegra30_utmi_param_configure(c); } static int tegra30_pll_clk_enable(struct clk *c) { u32 val; pr_debug("%s on clock %s\n", __func__, c->name); #if USE_PLL_LOCK_BITS val = clk_readl(c->reg + PLL_MISC(c)); val |= PLL_MISC_LOCK_ENABLE(c); clk_writel(val, c->reg + PLL_MISC(c)); #endif val = clk_readl(c->reg + PLL_BASE); val &= ~PLL_BASE_BYPASS; val |= PLL_BASE_ENABLE; clk_writel(val, c->reg + PLL_BASE); if (c->flags & PLLM) { val = pmc_readl(PMC_PLLP_WB0_OVERRIDE); val |= PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE; pmc_writel(val, PMC_PLLP_WB0_OVERRIDE); } tegra30_pll_clk_wait_for_lock(c, c->reg + PLL_BASE, PLL_BASE_LOCK); return 0; } static void tegra30_pll_clk_disable(struct clk *c) { u32 val; pr_debug("%s on clock %s\n", __func__, c->name); val = clk_readl(c->reg); val &= ~(PLL_BASE_BYPASS | PLL_BASE_ENABLE); clk_writel(val, c->reg); if (c->flags & PLLM) { val = pmc_readl(PMC_PLLP_WB0_OVERRIDE); val &= ~PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE; pmc_writel(val, PMC_PLLP_WB0_OVERRIDE); } } static int tegra30_pll_clk_set_rate(struct clk *c, unsigned long rate) { u32 val, p_div, old_base; unsigned long input_rate; const struct clk_pll_freq_table *sel; struct clk_pll_freq_table cfg; pr_debug("%s: %s %lu\n", __func__, c->name, rate); if (c->flags & PLL_FIXED) { int ret = 0; if (rate != c->u.pll.fixed_rate) { pr_err("%s: Can not change %s fixed rate %lu to %lu\n", __func__, c->name, c->u.pll.fixed_rate, rate); ret = -EINVAL; } return ret; } if (c->flags & PLLM) { if (rate != clk_get_rate_locked(c)) { pr_err("%s: Can not change memory %s rate in flight\n", __func__, c->name); return -EINVAL; } return 0; } p_div = 0; input_rate = clk_get_rate(c->parent); /* Check if the target rate is tabulated */ for (sel = c->u.pll.freq_table; sel->input_rate != 0; sel++) { if (sel->input_rate == input_rate && sel->output_rate == rate) { if (c->flags & PLLU) { BUG_ON(sel->p < 1 || sel->p > 2); if (sel->p == 1) p_div = PLLU_BASE_POST_DIV; } else { BUG_ON(sel->p < 1); for (val = sel->p; val > 1; val >>= 1) p_div++; p_div <<= PLL_BASE_DIVP_SHIFT; } break; } } /* Configure out-of-table rate */ if (sel->input_rate == 0) { unsigned long cfreq; BUG_ON(c->flags & PLLU); sel = &cfg; switch (input_rate) { case 12000000: case 26000000: cfreq = (rate <= 1000000 * 1000) ? 1000000 : 2000000; break; case 13000000: cfreq = (rate <= 1000000 * 1000) ? 1000000 : 2600000; break; case 16800000: case 19200000: cfreq = (rate <= 1200000 * 1000) ? 1200000 : 2400000; break; default: pr_err("%s: Unexpected reference rate %lu\n", __func__, input_rate); BUG(); } /* Raise VCO to guarantee 0.5% accuracy */ for (cfg.output_rate = rate; cfg.output_rate < 200 * cfreq; cfg.output_rate <<= 1) p_div++; cfg.p = 0x1 << p_div; cfg.m = input_rate / cfreq; cfg.n = cfg.output_rate / cfreq; cfg.cpcon = OUT_OF_TABLE_CPCON; if ((cfg.m > (PLL_BASE_DIVM_MASK >> PLL_BASE_DIVM_SHIFT)) || (cfg.n > (PLL_BASE_DIVN_MASK >> PLL_BASE_DIVN_SHIFT)) || (p_div > (PLL_BASE_DIVP_MASK >> PLL_BASE_DIVP_SHIFT)) || (cfg.output_rate > c->u.pll.vco_max)) { pr_err("%s: Failed to set %s out-of-table rate %lu\n", __func__, c->name, rate); return -EINVAL; } p_div <<= PLL_BASE_DIVP_SHIFT; } c->mul = sel->n; c->div = sel->m * sel->p; old_base = val = clk_readl(c->reg + PLL_BASE); val &= ~(PLL_BASE_DIVM_MASK | PLL_BASE_DIVN_MASK | ((c->flags & PLLU) ? PLLU_BASE_POST_DIV : PLL_BASE_DIVP_MASK)); val |= (sel->m << PLL_BASE_DIVM_SHIFT) | (sel->n << PLL_BASE_DIVN_SHIFT) | p_div; if (val == old_base) return 0; if (c->state == ON) { tegra30_pll_clk_disable(c); val &= ~(PLL_BASE_BYPASS | PLL_BASE_ENABLE); } clk_writel(val, c->reg + PLL_BASE); if (c->flags & PLL_HAS_CPCON) { val = clk_readl(c->reg + PLL_MISC(c)); val &= ~PLL_MISC_CPCON_MASK; val |= sel->cpcon << PLL_MISC_CPCON_SHIFT; if (c->flags & (PLLU | PLLD)) { val &= ~PLL_MISC_LFCON_MASK; if (sel->n >= PLLDU_LFCON_SET_DIVN) val |= 0x1 << PLL_MISC_LFCON_SHIFT; } else if (c->flags & (PLLX | PLLM)) { val &= ~(0x1 << PLL_MISC_DCCON_SHIFT); if (rate >= (c->u.pll.vco_max >> 1)) val |= 0x1 << PLL_MISC_DCCON_SHIFT; } clk_writel(val, c->reg + PLL_MISC(c)); } if (c->state == ON) tegra30_pll_clk_enable(c); return 0; } static struct clk_ops tegra_pll_ops = { .init = tegra30_pll_clk_init, .enable = tegra30_pll_clk_enable, .disable = tegra30_pll_clk_disable, .set_rate = tegra30_pll_clk_set_rate, }; static int tegra30_plld_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting) { u32 val, mask, reg; switch (p) { case TEGRA_CLK_PLLD_CSI_OUT_ENB: mask = PLLD_BASE_CSI_CLKENABLE; reg = c->reg + PLL_BASE; break; case TEGRA_CLK_PLLD_DSI_OUT_ENB: mask = PLLD_MISC_DSI_CLKENABLE; reg = c->reg + PLL_MISC(c); break; case TEGRA_CLK_PLLD_MIPI_MUX_SEL: if (!(c->flags & PLL_ALT_MISC_REG)) { mask = PLLD_BASE_DSIB_MUX_MASK; reg = c->reg + PLL_BASE; break; } /* fall through - error since PLLD2 does not have MUX_SEL control */ default: return -EINVAL; } val = clk_readl(reg); if (setting) val |= mask; else val &= ~mask; clk_writel(val, reg); return 0; } static struct clk_ops tegra_plld_ops = { .init = tegra30_pll_clk_init, .enable = tegra30_pll_clk_enable, .disable = tegra30_pll_clk_disable, .set_rate = tegra30_pll_clk_set_rate, .clk_cfg_ex = tegra30_plld_clk_cfg_ex, }; static void tegra30_plle_clk_init(struct clk *c) { u32 val; val = clk_readl(PLLE_AUX); c->parent = (val & PLLE_AUX_PLLP_SEL) ? tegra_get_clock_by_name("pll_p") : tegra_get_clock_by_name("pll_ref"); val = clk_readl(c->reg + PLL_BASE); c->state = (val & PLLE_BASE_ENABLE) ? ON : OFF; c->mul = (val & PLLE_BASE_DIVN_MASK) >> PLLE_BASE_DIVN_SHIFT; c->div = (val & PLLE_BASE_DIVM_MASK) >> PLLE_BASE_DIVM_SHIFT; c->div *= (val & PLLE_BASE_DIVP_MASK) >> PLLE_BASE_DIVP_SHIFT; } static void tegra30_plle_clk_disable(struct clk *c) { u32 val; pr_debug("%s on clock %s\n", __func__, c->name); val = clk_readl(c->reg + PLL_BASE); val &= ~(PLLE_BASE_CML_ENABLE | PLLE_BASE_ENABLE); clk_writel(val, c->reg + PLL_BASE); } static void tegra30_plle_training(struct clk *c) { u32 val; /* PLLE is already disabled, and setup cleared; * create falling edge on PLLE IDDQ input */ val = pmc_readl(PMC_SATA_PWRGT); val |= PMC_SATA_PWRGT_PLLE_IDDQ_VALUE; pmc_writel(val, PMC_SATA_PWRGT); val = pmc_readl(PMC_SATA_PWRGT); val |= PMC_SATA_PWRGT_PLLE_IDDQ_SWCTL; pmc_writel(val, PMC_SATA_PWRGT); val = pmc_readl(PMC_SATA_PWRGT); val &= ~PMC_SATA_PWRGT_PLLE_IDDQ_VALUE; pmc_writel(val, PMC_SATA_PWRGT); do { val = clk_readl(c->reg + PLL_MISC(c)); } while (!(val & PLLE_MISC_READY)); } static int tegra30_plle_configure(struct clk *c, bool force_training) { u32 val; const struct clk_pll_freq_table *sel; unsigned long rate = c->u.pll.fixed_rate; unsigned long input_rate = clk_get_rate(c->parent); for (sel = c->u.pll.freq_table; sel->input_rate != 0; sel++) { if (sel->input_rate == input_rate && sel->output_rate == rate) break; } if (sel->input_rate == 0) return -ENOSYS; /* disable PLLE, clear setup fiels */ tegra30_plle_clk_disable(c); val = clk_readl(c->reg + PLL_MISC(c)); val &= ~(PLLE_MISC_LOCK_ENABLE | PLLE_MISC_SETUP_MASK); clk_writel(val, c->reg + PLL_MISC(c)); /* training */ val = clk_readl(c->reg + PLL_MISC(c)); if (force_training || (!(val & PLLE_MISC_READY))) tegra30_plle_training(c); /* configure dividers, setup, disable SS */ val = clk_readl(c->reg + PLL_BASE); val &= ~PLLE_BASE_DIV_MASK; val |= PLLE_BASE_DIV(sel->m, sel->n, sel->p, sel->cpcon); clk_writel(val, c->reg + PLL_BASE); c->mul = sel->n; c->div = sel->m * sel->p; val = clk_readl(c->reg + PLL_MISC(c)); val |= PLLE_MISC_SETUP_VALUE; val |= PLLE_MISC_LOCK_ENABLE; clk_writel(val, c->reg + PLL_MISC(c)); val = clk_readl(PLLE_SS_CTRL); val |= PLLE_SS_DISABLE; clk_writel(val, PLLE_SS_CTRL); /* enable and lock PLLE*/ val = clk_readl(c->reg + PLL_BASE); val |= (PLLE_BASE_CML_ENABLE | PLLE_BASE_ENABLE); clk_writel(val, c->reg + PLL_BASE); tegra30_pll_clk_wait_for_lock(c, c->reg + PLL_MISC(c), PLLE_MISC_LOCK); return 0; } static int tegra30_plle_clk_enable(struct clk *c) { pr_debug("%s on clock %s\n", __func__, c->name); return tegra30_plle_configure(c, !c->set); } static struct clk_ops tegra_plle_ops = { .init = tegra30_plle_clk_init, .enable = tegra30_plle_clk_enable, .disable = tegra30_plle_clk_disable, }; /* Clock divider ops */ static void tegra30_pll_div_clk_init(struct clk *c) { if (c->flags & DIV_U71) { u32 divu71; u32 val = clk_readl(c->reg); val >>= c->reg_shift; c->state = (val & PLL_OUT_CLKEN) ? ON : OFF; if (!(val & PLL_OUT_RESET_DISABLE)) c->state = OFF; divu71 = (val & PLL_OUT_RATIO_MASK) >> PLL_OUT_RATIO_SHIFT; c->div = (divu71 + 2); c->mul = 2; } else if (c->flags & DIV_2) { c->state = ON; if (c->flags & (PLLD | PLLX)) { c->div = 2; c->mul = 1; } else BUG(); } else { c->state = ON; c->div = 1; c->mul = 1; } } static int tegra30_pll_div_clk_enable(struct clk *c) { u32 val; u32 new_val; pr_debug("%s: %s\n", __func__, c->name); if (c->flags & DIV_U71) { val = clk_readl(c->reg); new_val = val >> c->reg_shift; new_val &= 0xFFFF; new_val |= PLL_OUT_CLKEN | PLL_OUT_RESET_DISABLE; val &= ~(0xFFFF << c->reg_shift); val |= new_val << c->reg_shift; clk_writel_delay(val, c->reg); return 0; } else if (c->flags & DIV_2) { return 0; } return -EINVAL; } static void tegra30_pll_div_clk_disable(struct clk *c) { u32 val; u32 new_val; pr_debug("%s: %s\n", __func__, c->name); if (c->flags & DIV_U71) { val = clk_readl(c->reg); new_val = val >> c->reg_shift; new_val &= 0xFFFF; new_val &= ~(PLL_OUT_CLKEN | PLL_OUT_RESET_DISABLE); val &= ~(0xFFFF << c->reg_shift); val |= new_val << c->reg_shift; clk_writel_delay(val, c->reg); } } static int tegra30_pll_div_clk_set_rate(struct clk *c, unsigned long rate) { u32 val; u32 new_val; int divider_u71; unsigned long parent_rate = clk_get_rate(c->parent); pr_debug("%s: %s %lu\n", __func__, c->name, rate); if (c->flags & DIV_U71) { divider_u71 = clk_div71_get_divider( parent_rate, rate, c->flags, ROUND_DIVIDER_UP); if (divider_u71 >= 0) { val = clk_readl(c->reg); new_val = val >> c->reg_shift; new_val &= 0xFFFF; if (c->flags & DIV_U71_FIXED) new_val |= PLL_OUT_OVERRIDE; new_val &= ~PLL_OUT_RATIO_MASK; new_val |= divider_u71 << PLL_OUT_RATIO_SHIFT; val &= ~(0xFFFF << c->reg_shift); val |= new_val << c->reg_shift; clk_writel_delay(val, c->reg); c->div = divider_u71 + 2; c->mul = 2; return 0; } } else if (c->flags & DIV_2) return clk_set_rate(c->parent, rate * 2); return -EINVAL; } static long tegra30_pll_div_clk_round_rate(struct clk *c, unsigned long rate) { int divider; unsigned long parent_rate = clk_get_rate(c->parent); pr_debug("%s: %s %lu\n", __func__, c->name, rate); if (c->flags & DIV_U71) { divider = clk_div71_get_divider( parent_rate, rate, c->flags, ROUND_DIVIDER_UP); if (divider < 0) return divider; return DIV_ROUND_UP(parent_rate * 2, divider + 2); } else if (c->flags & DIV_2) /* no rounding - fixed DIV_2 dividers pass rate to parent PLL */ return rate; return -EINVAL; } static struct clk_ops tegra_pll_div_ops = { .init = tegra30_pll_div_clk_init, .enable = tegra30_pll_div_clk_enable, .disable = tegra30_pll_div_clk_disable, .set_rate = tegra30_pll_div_clk_set_rate, .round_rate = tegra30_pll_div_clk_round_rate, }; /* Periph clk ops */ static inline u32 periph_clk_source_mask(struct clk *c) { if (c->flags & MUX8) return 7 << 29; else if (c->flags & MUX_PWM) return 3 << 28; else if (c->flags & MUX_CLK_OUT) return 3 << (c->u.periph.clk_num + 4); else if (c->flags & PLLD) return PLLD_BASE_DSIB_MUX_MASK; else return 3 << 30; } static inline u32 periph_clk_source_shift(struct clk *c) { if (c->flags & MUX8) return 29; else if (c->flags & MUX_PWM) return 28; else if (c->flags & MUX_CLK_OUT) return c->u.periph.clk_num + 4; else if (c->flags & PLLD) return PLLD_BASE_DSIB_MUX_SHIFT; else return 30; } static void tegra30_periph_clk_init(struct clk *c) { u32 val = clk_readl(c->reg); const struct clk_mux_sel *mux = 0; const struct clk_mux_sel *sel; if (c->flags & MUX) { for (sel = c->inputs; sel->input != NULL; sel++) { if (((val & periph_clk_source_mask(c)) >> periph_clk_source_shift(c)) == sel->value) mux = sel; } BUG_ON(!mux); c->parent = mux->input; } else { c->parent = c->inputs[0].input; } if (c->flags & DIV_U71) { u32 divu71 = val & PERIPH_CLK_SOURCE_DIVU71_MASK; if ((c->flags & DIV_U71_UART) && (!(val & PERIPH_CLK_UART_DIV_ENB))) { divu71 = 0; } if (c->flags & DIV_U71_IDLE) { val &= ~(PERIPH_CLK_SOURCE_DIVU71_MASK << PERIPH_CLK_SOURCE_DIVIDLE_SHIFT); val |= (PERIPH_CLK_SOURCE_DIVIDLE_VAL << PERIPH_CLK_SOURCE_DIVIDLE_SHIFT); clk_writel(val, c->reg); } c->div = divu71 + 2; c->mul = 2; } else if (c->flags & DIV_U16) { u32 divu16 = val & PERIPH_CLK_SOURCE_DIVU16_MASK; c->div = divu16 + 1; c->mul = 1; } else { c->div = 1; c->mul = 1; } c->state = ON; if (!(clk_readl(PERIPH_CLK_TO_ENB_REG(c)) & PERIPH_CLK_TO_BIT(c))) c->state = OFF; if (!(c->flags & PERIPH_NO_RESET)) if (clk_readl(PERIPH_CLK_TO_RST_REG(c)) & PERIPH_CLK_TO_BIT(c)) c->state = OFF; } static int tegra30_periph_clk_enable(struct clk *c) { pr_debug("%s on clock %s\n", __func__, c->name); tegra_periph_clk_enable_refcount[c->u.periph.clk_num]++; if (tegra_periph_clk_enable_refcount[c->u.periph.clk_num] > 1) return 0; clk_writel_delay(PERIPH_CLK_TO_BIT(c), PERIPH_CLK_TO_ENB_SET_REG(c)); if (!(c->flags & PERIPH_NO_RESET) && !(c->flags & PERIPH_MANUAL_RESET)) { if (clk_readl(PERIPH_CLK_TO_RST_REG(c)) & PERIPH_CLK_TO_BIT(c)) { udelay(5); /* reset propagation delay */ clk_writel(PERIPH_CLK_TO_BIT(c), PERIPH_CLK_TO_RST_CLR_REG(c)); } } return 0; } static void tegra30_periph_clk_disable(struct clk *c) { unsigned long val; pr_debug("%s on clock %s\n", __func__, c->name); if (c->refcnt) tegra_periph_clk_enable_refcount[c->u.periph.clk_num]--; if (tegra_periph_clk_enable_refcount[c->u.periph.clk_num] == 0) { /* If peripheral is in the APB bus then read the APB bus to * flush the write operation in apb bus. This will avoid the * peripheral access after disabling clock*/ if (c->flags & PERIPH_ON_APB) val = chipid_readl(); clk_writel_delay( PERIPH_CLK_TO_BIT(c), PERIPH_CLK_TO_ENB_CLR_REG(c)); } } static void tegra30_periph_clk_reset(struct clk *c, bool assert) { unsigned long val; pr_debug("%s %s on clock %s\n", __func__, assert ? "assert" : "deassert", c->name); if (!(c->flags & PERIPH_NO_RESET)) { if (assert) { /* If peripheral is in the APB bus then read the APB * bus to flush the write operation in apb bus. This * will avoid the peripheral access after disabling * clock */ if (c->flags & PERIPH_ON_APB) val = chipid_readl(); clk_writel(PERIPH_CLK_TO_BIT(c), PERIPH_CLK_TO_RST_SET_REG(c)); } else clk_writel(PERIPH_CLK_TO_BIT(c), PERIPH_CLK_TO_RST_CLR_REG(c)); } } static int tegra30_periph_clk_set_parent(struct clk *c, struct clk *p) { u32 val; const struct clk_mux_sel *sel; pr_debug("%s: %s %s\n", __func__, c->name, p->name); if (!(c->flags & MUX)) return (p == c->parent) ? 0 : (-EINVAL); for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->input == p) { val = clk_readl(c->reg); val &= ~periph_clk_source_mask(c); val |= (sel->value << periph_clk_source_shift(c)); if (c->refcnt) clk_enable(p); clk_writel_delay(val, c->reg); if (c->refcnt && c->parent) clk_disable(c->parent); clk_reparent(c, p); return 0; } } return -EINVAL; } static int tegra30_periph_clk_set_rate(struct clk *c, unsigned long rate) { u32 val; int divider; unsigned long parent_rate = clk_get_rate(c->parent); if (c->flags & DIV_U71) { divider = clk_div71_get_divider( parent_rate, rate, c->flags, ROUND_DIVIDER_UP); if (divider >= 0) { val = clk_readl(c->reg); val &= ~PERIPH_CLK_SOURCE_DIVU71_MASK; val |= divider; if (c->flags & DIV_U71_UART) { if (divider) val |= PERIPH_CLK_UART_DIV_ENB; else val &= ~PERIPH_CLK_UART_DIV_ENB; } clk_writel_delay(val, c->reg); c->div = divider + 2; c->mul = 2; return 0; } } else if (c->flags & DIV_U16) { divider = clk_div16_get_divider(parent_rate, rate); if (divider >= 0) { val = clk_readl(c->reg); val &= ~PERIPH_CLK_SOURCE_DIVU16_MASK; val |= divider; clk_writel_delay(val, c->reg); c->div = divider + 1; c->mul = 1; return 0; } } else if (parent_rate <= rate) { c->div = 1; c->mul = 1; return 0; } return -EINVAL; } static long tegra30_periph_clk_round_rate(struct clk *c, unsigned long rate) { int divider; unsigned long parent_rate = clk_get_rate(c->parent); pr_debug("%s: %s %lu\n", __func__, c->name, rate); if (c->flags & DIV_U71) { divider = clk_div71_get_divider( parent_rate, rate, c->flags, ROUND_DIVIDER_UP); if (divider < 0) return divider; return DIV_ROUND_UP(parent_rate * 2, divider + 2); } else if (c->flags & DIV_U16) { divider = clk_div16_get_divider(parent_rate, rate); if (divider < 0) return divider; return DIV_ROUND_UP(parent_rate, divider + 1); } return -EINVAL; } static struct clk_ops tegra_periph_clk_ops = { .init = &tegra30_periph_clk_init, .enable = &tegra30_periph_clk_enable, .disable = &tegra30_periph_clk_disable, .set_parent = &tegra30_periph_clk_set_parent, .set_rate = &tegra30_periph_clk_set_rate, .round_rate = &tegra30_periph_clk_round_rate, .reset = &tegra30_periph_clk_reset, }; /* Periph extended clock configuration ops */ static int tegra30_vi_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting) { if (p == TEGRA_CLK_VI_INP_SEL) { u32 val = clk_readl(c->reg); val &= ~PERIPH_CLK_VI_SEL_EX_MASK; val |= (setting << PERIPH_CLK_VI_SEL_EX_SHIFT) & PERIPH_CLK_VI_SEL_EX_MASK; clk_writel(val, c->reg); return 0; } return -EINVAL; } static struct clk_ops tegra_vi_clk_ops = { .init = &tegra30_periph_clk_init, .enable = &tegra30_periph_clk_enable, .disable = &tegra30_periph_clk_disable, .set_parent = &tegra30_periph_clk_set_parent, .set_rate = &tegra30_periph_clk_set_rate, .round_rate = &tegra30_periph_clk_round_rate, .clk_cfg_ex = &tegra30_vi_clk_cfg_ex, .reset = &tegra30_periph_clk_reset, }; static int tegra30_nand_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting) { if (p == TEGRA_CLK_NAND_PAD_DIV2_ENB) { u32 val = clk_readl(c->reg); if (setting) val |= PERIPH_CLK_NAND_DIV_EX_ENB; else val &= ~PERIPH_CLK_NAND_DIV_EX_ENB; clk_writel(val, c->reg); return 0; } return -EINVAL; } static struct clk_ops tegra_nand_clk_ops = { .init = &tegra30_periph_clk_init, .enable = &tegra30_periph_clk_enable, .disable = &tegra30_periph_clk_disable, .set_parent = &tegra30_periph_clk_set_parent, .set_rate = &tegra30_periph_clk_set_rate, .round_rate = &tegra30_periph_clk_round_rate, .clk_cfg_ex = &tegra30_nand_clk_cfg_ex, .reset = &tegra30_periph_clk_reset, }; static int tegra30_dtv_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting) { if (p == TEGRA_CLK_DTV_INVERT) { u32 val = clk_readl(c->reg); if (setting) val |= PERIPH_CLK_DTV_POLARITY_INV; else val &= ~PERIPH_CLK_DTV_POLARITY_INV; clk_writel(val, c->reg); return 0; } return -EINVAL; } static struct clk_ops tegra_dtv_clk_ops = { .init = &tegra30_periph_clk_init, .enable = &tegra30_periph_clk_enable, .disable = &tegra30_periph_clk_disable, .set_parent = &tegra30_periph_clk_set_parent, .set_rate = &tegra30_periph_clk_set_rate, .round_rate = &tegra30_periph_clk_round_rate, .clk_cfg_ex = &tegra30_dtv_clk_cfg_ex, .reset = &tegra30_periph_clk_reset, }; static int tegra30_dsib_clk_set_parent(struct clk *c, struct clk *p) { const struct clk_mux_sel *sel; struct clk *d = tegra_get_clock_by_name("pll_d"); pr_debug("%s: %s %s\n", __func__, c->name, p->name); for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->input == p) { if (c->refcnt) clk_enable(p); /* The DSIB parent selection bit is in PLLD base register - can not do direct r-m-w, must be protected by PLLD lock */ tegra_clk_cfg_ex( d, TEGRA_CLK_PLLD_MIPI_MUX_SEL, sel->value); if (c->refcnt && c->parent) clk_disable(c->parent); clk_reparent(c, p); return 0; } } return -EINVAL; } static struct clk_ops tegra_dsib_clk_ops = { .init = &tegra30_periph_clk_init, .enable = &tegra30_periph_clk_enable, .disable = &tegra30_periph_clk_disable, .set_parent = &tegra30_dsib_clk_set_parent, .set_rate = &tegra30_periph_clk_set_rate, .round_rate = &tegra30_periph_clk_round_rate, .reset = &tegra30_periph_clk_reset, }; /* pciex clock support only reset function */ static struct clk_ops tegra_pciex_clk_ops = { .reset = tegra30_periph_clk_reset, }; /* Output clock ops */ static DEFINE_SPINLOCK(clk_out_lock); static void tegra30_clk_out_init(struct clk *c) { const struct clk_mux_sel *mux = 0; const struct clk_mux_sel *sel; u32 val = pmc_readl(c->reg); c->state = (val & (0x1 << c->u.periph.clk_num)) ? ON : OFF; c->mul = 1; c->div = 1; for (sel = c->inputs; sel->input != NULL; sel++) { if (((val & periph_clk_source_mask(c)) >> periph_clk_source_shift(c)) == sel->value) mux = sel; } BUG_ON(!mux); c->parent = mux->input; } static int tegra30_clk_out_enable(struct clk *c) { u32 val; unsigned long flags; pr_debug("%s on clock %s\n", __func__, c->name); spin_lock_irqsave(&clk_out_lock, flags); val = pmc_readl(c->reg); val |= (0x1 << c->u.periph.clk_num); pmc_writel(val, c->reg); spin_unlock_irqrestore(&clk_out_lock, flags); return 0; } static void tegra30_clk_out_disable(struct clk *c) { u32 val; unsigned long flags; pr_debug("%s on clock %s\n", __func__, c->name); spin_lock_irqsave(&clk_out_lock, flags); val = pmc_readl(c->reg); val &= ~(0x1 << c->u.periph.clk_num); pmc_writel(val, c->reg); spin_unlock_irqrestore(&clk_out_lock, flags); } static int tegra30_clk_out_set_parent(struct clk *c, struct clk *p) { u32 val; unsigned long flags; const struct clk_mux_sel *sel; pr_debug("%s: %s %s\n", __func__, c->name, p->name); for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->input == p) { if (c->refcnt) clk_enable(p); spin_lock_irqsave(&clk_out_lock, flags); val = pmc_readl(c->reg); val &= ~periph_clk_source_mask(c); val |= (sel->value << periph_clk_source_shift(c)); pmc_writel(val, c->reg); spin_unlock_irqrestore(&clk_out_lock, flags); if (c->refcnt && c->parent) clk_disable(c->parent); clk_reparent(c, p); return 0; } } return -EINVAL; } static struct clk_ops tegra_clk_out_ops = { .init = &tegra30_clk_out_init, .enable = &tegra30_clk_out_enable, .disable = &tegra30_clk_out_disable, .set_parent = &tegra30_clk_out_set_parent, }; /* Clock doubler ops */ static void tegra30_clk_double_init(struct clk *c) { u32 val = clk_readl(c->reg); c->mul = val & (0x1 << c->reg_shift) ? 1 : 2; c->div = 1; c->state = ON; if (!(clk_readl(PERIPH_CLK_TO_ENB_REG(c)) & PERIPH_CLK_TO_BIT(c))) c->state = OFF; }; static int tegra30_clk_double_set_rate(struct clk *c, unsigned long rate) { u32 val; unsigned long parent_rate = clk_get_rate(c->parent); if (rate == parent_rate) { val = clk_readl(c->reg) | (0x1 << c->reg_shift); clk_writel(val, c->reg); c->mul = 1; c->div = 1; return 0; } else if (rate == 2 * parent_rate) { val = clk_readl(c->reg) & (~(0x1 << c->reg_shift)); clk_writel(val, c->reg); c->mul = 2; c->div = 1; return 0; } return -EINVAL; } static struct clk_ops tegra_clk_double_ops = { .init = &tegra30_clk_double_init, .enable = &tegra30_periph_clk_enable, .disable = &tegra30_periph_clk_disable, .set_rate = &tegra30_clk_double_set_rate, }; /* Audio sync clock ops */ static int tegra30_sync_source_set_rate(struct clk *c, unsigned long rate) { c->rate = rate; return 0; } static struct clk_ops tegra_sync_source_ops = { .set_rate = &tegra30_sync_source_set_rate, }; static void tegra30_audio_sync_clk_init(struct clk *c) { int source; const struct clk_mux_sel *sel; u32 val = clk_readl(c->reg); c->state = (val & AUDIO_SYNC_DISABLE_BIT) ? OFF : ON; source = val & AUDIO_SYNC_SOURCE_MASK; for (sel = c->inputs; sel->input != NULL; sel++) if (sel->value == source) break; BUG_ON(sel->input == NULL); c->parent = sel->input; } static int tegra30_audio_sync_clk_enable(struct clk *c) { u32 val = clk_readl(c->reg); clk_writel((val & (~AUDIO_SYNC_DISABLE_BIT)), c->reg); return 0; } static void tegra30_audio_sync_clk_disable(struct clk *c) { u32 val = clk_readl(c->reg); clk_writel((val | AUDIO_SYNC_DISABLE_BIT), c->reg); } static int tegra30_audio_sync_clk_set_parent(struct clk *c, struct clk *p) { u32 val; const struct clk_mux_sel *sel; for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->input == p) { val = clk_readl(c->reg); val &= ~AUDIO_SYNC_SOURCE_MASK; val |= sel->value; if (c->refcnt) clk_enable(p); clk_writel(val, c->reg); if (c->refcnt && c->parent) clk_disable(c->parent); clk_reparent(c, p); return 0; } } return -EINVAL; } static struct clk_ops tegra_audio_sync_clk_ops = { .init = tegra30_audio_sync_clk_init, .enable = tegra30_audio_sync_clk_enable, .disable = tegra30_audio_sync_clk_disable, .set_parent = tegra30_audio_sync_clk_set_parent, }; /* cml0 (pcie), and cml1 (sata) clock ops */ static void tegra30_cml_clk_init(struct clk *c) { u32 val = clk_readl(c->reg); c->state = val & (0x1 << c->u.periph.clk_num) ? ON : OFF; } static int tegra30_cml_clk_enable(struct clk *c) { u32 val = clk_readl(c->reg); val |= (0x1 << c->u.periph.clk_num); clk_writel(val, c->reg); return 0; } static void tegra30_cml_clk_disable(struct clk *c) { u32 val = clk_readl(c->reg); val &= ~(0x1 << c->u.periph.clk_num); clk_writel(val, c->reg); } static struct clk_ops tegra_cml_clk_ops = { .init = &tegra30_cml_clk_init, .enable = &tegra30_cml_clk_enable, .disable = &tegra30_cml_clk_disable, }; /* Clock definitions */ static struct clk tegra_clk_32k = { .name = "clk_32k", .rate = 32768, .ops = NULL, .max_rate = 32768, }; static struct clk tegra_clk_m = { .name = "clk_m", .flags = ENABLE_ON_INIT, .ops = &tegra_clk_m_ops, .reg = 0x1fc, .reg_shift = 28, .max_rate = 48000000, }; static struct clk tegra_clk_m_div2 = { .name = "clk_m_div2", .ops = &tegra_clk_m_div_ops, .parent = &tegra_clk_m, .mul = 1, .div = 2, .state = ON, .max_rate = 24000000, }; static struct clk tegra_clk_m_div4 = { .name = "clk_m_div4", .ops = &tegra_clk_m_div_ops, .parent = &tegra_clk_m, .mul = 1, .div = 4, .state = ON, .max_rate = 12000000, }; static struct clk tegra_pll_ref = { .name = "pll_ref", .flags = ENABLE_ON_INIT, .ops = &tegra_pll_ref_ops, .parent = &tegra_clk_m, .max_rate = 26000000, }; static struct clk_pll_freq_table tegra_pll_c_freq_table[] = { { 12000000, 1040000000, 520, 6, 1, 8}, { 13000000, 1040000000, 480, 6, 1, 8}, { 16800000, 1040000000, 495, 8, 1, 8}, /* actual: 1039.5 MHz */ { 19200000, 1040000000, 325, 6, 1, 6}, { 26000000, 1040000000, 520, 13, 1, 8}, { 12000000, 832000000, 416, 6, 1, 8}, { 13000000, 832000000, 832, 13, 1, 8}, { 16800000, 832000000, 396, 8, 1, 8}, /* actual: 831.6 MHz */ { 19200000, 832000000, 260, 6, 1, 8}, { 26000000, 832000000, 416, 13, 1, 8}, { 12000000, 624000000, 624, 12, 1, 8}, { 13000000, 624000000, 624, 13, 1, 8}, { 16800000, 600000000, 520, 14, 1, 8}, { 19200000, 624000000, 520, 16, 1, 8}, { 26000000, 624000000, 624, 26, 1, 8}, { 12000000, 600000000, 600, 12, 1, 8}, { 13000000, 600000000, 600, 13, 1, 8}, { 16800000, 600000000, 500, 14, 1, 8}, { 19200000, 600000000, 375, 12, 1, 6}, { 26000000, 600000000, 600, 26, 1, 8}, { 12000000, 520000000, 520, 12, 1, 8}, { 13000000, 520000000, 520, 13, 1, 8}, { 16800000, 520000000, 495, 16, 1, 8}, /* actual: 519.75 MHz */ { 19200000, 520000000, 325, 12, 1, 6}, { 26000000, 520000000, 520, 26, 1, 8}, { 12000000, 416000000, 416, 12, 1, 8}, { 13000000, 416000000, 416, 13, 1, 8}, { 16800000, 416000000, 396, 16, 1, 8}, /* actual: 415.8 MHz */ { 19200000, 416000000, 260, 12, 1, 6}, { 26000000, 416000000, 416, 26, 1, 8}, { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_c = { .name = "pll_c", .flags = PLL_HAS_CPCON, .ops = &tegra_pll_ops, .reg = 0x80, .parent = &tegra_pll_ref, .max_rate = 1400000000, .u.pll = { .input_min = 2000000, .input_max = 31000000, .cf_min = 1000000, .cf_max = 6000000, .vco_min = 20000000, .vco_max = 1400000000, .freq_table = tegra_pll_c_freq_table, .lock_delay = 300, }, }; static struct clk tegra_pll_c_out1 = { .name = "pll_c_out1", .ops = &tegra_pll_div_ops, .flags = DIV_U71, .parent = &tegra_pll_c, .reg = 0x84, .reg_shift = 0, .max_rate = 700000000, }; static struct clk_pll_freq_table tegra_pll_m_freq_table[] = { { 12000000, 666000000, 666, 12, 1, 8}, { 13000000, 666000000, 666, 13, 1, 8}, { 16800000, 666000000, 555, 14, 1, 8}, { 19200000, 666000000, 555, 16, 1, 8}, { 26000000, 666000000, 666, 26, 1, 8}, { 12000000, 600000000, 600, 12, 1, 8}, { 13000000, 600000000, 600, 13, 1, 8}, { 16800000, 600000000, 500, 14, 1, 8}, { 19200000, 600000000, 375, 12, 1, 6}, { 26000000, 600000000, 600, 26, 1, 8}, { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_m = { .name = "pll_m", .flags = PLL_HAS_CPCON | PLLM, .ops = &tegra_pll_ops, .reg = 0x90, .parent = &tegra_pll_ref, .max_rate = 800000000, .u.pll = { .input_min = 2000000, .input_max = 31000000, .cf_min = 1000000, .cf_max = 6000000, .vco_min = 20000000, .vco_max = 1200000000, .freq_table = tegra_pll_m_freq_table, .lock_delay = 300, }, }; static struct clk tegra_pll_m_out1 = { .name = "pll_m_out1", .ops = &tegra_pll_div_ops, .flags = DIV_U71, .parent = &tegra_pll_m, .reg = 0x94, .reg_shift = 0, .max_rate = 600000000, }; static struct clk_pll_freq_table tegra_pll_p_freq_table[] = { { 12000000, 216000000, 432, 12, 2, 8}, { 13000000, 216000000, 432, 13, 2, 8}, { 16800000, 216000000, 360, 14, 2, 8}, { 19200000, 216000000, 360, 16, 2, 8}, { 26000000, 216000000, 432, 26, 2, 8}, { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_p = { .name = "pll_p", .flags = ENABLE_ON_INIT | PLL_FIXED | PLL_HAS_CPCON, .ops = &tegra_pll_ops, .reg = 0xa0, .parent = &tegra_pll_ref, .max_rate = 432000000, .u.pll = { .input_min = 2000000, .input_max = 31000000, .cf_min = 1000000, .cf_max = 6000000, .vco_min = 20000000, .vco_max = 1400000000, .freq_table = tegra_pll_p_freq_table, .lock_delay = 300, .fixed_rate = 408000000, }, }; static struct clk tegra_pll_p_out1 = { .name = "pll_p_out1", .ops = &tegra_pll_div_ops, .flags = ENABLE_ON_INIT | DIV_U71 | DIV_U71_FIXED, .parent = &tegra_pll_p, .reg = 0xa4, .reg_shift = 0, .max_rate = 432000000, }; static struct clk tegra_pll_p_out2 = { .name = "pll_p_out2", .ops = &tegra_pll_div_ops, .flags = ENABLE_ON_INIT | DIV_U71 | DIV_U71_FIXED, .parent = &tegra_pll_p, .reg = 0xa4, .reg_shift = 16, .max_rate = 432000000, }; static struct clk tegra_pll_p_out3 = { .name = "pll_p_out3", .ops = &tegra_pll_div_ops, .flags = ENABLE_ON_INIT | DIV_U71 | DIV_U71_FIXED, .parent = &tegra_pll_p, .reg = 0xa8, .reg_shift = 0, .max_rate = 432000000, }; static struct clk tegra_pll_p_out4 = { .name = "pll_p_out4", .ops = &tegra_pll_div_ops, .flags = ENABLE_ON_INIT | DIV_U71 | DIV_U71_FIXED, .parent = &tegra_pll_p, .reg = 0xa8, .reg_shift = 16, .max_rate = 432000000, }; static struct clk_pll_freq_table tegra_pll_a_freq_table[] = { { 9600000, 564480000, 294, 5, 1, 4}, { 9600000, 552960000, 288, 5, 1, 4}, { 9600000, 24000000, 5, 2, 1, 1}, { 28800000, 56448000, 49, 25, 1, 1}, { 28800000, 73728000, 64, 25, 1, 1}, { 28800000, 24000000, 5, 6, 1, 1}, { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_a = { .name = "pll_a", .flags = PLL_HAS_CPCON, .ops = &tegra_pll_ops, .reg = 0xb0, .parent = &tegra_pll_p_out1, .max_rate = 700000000, .u.pll = { .input_min = 2000000, .input_max = 31000000, .cf_min = 1000000, .cf_max = 6000000, .vco_min = 20000000, .vco_max = 1400000000, .freq_table = tegra_pll_a_freq_table, .lock_delay = 300, }, }; static struct clk tegra_pll_a_out0 = { .name = "pll_a_out0", .ops = &tegra_pll_div_ops, .flags = DIV_U71, .parent = &tegra_pll_a, .reg = 0xb4, .reg_shift = 0, .max_rate = 100000000, }; static struct clk_pll_freq_table tegra_pll_d_freq_table[] = { { 12000000, 216000000, 216, 12, 1, 4}, { 13000000, 216000000, 216, 13, 1, 4}, { 16800000, 216000000, 180, 14, 1, 4}, { 19200000, 216000000, 180, 16, 1, 4}, { 26000000, 216000000, 216, 26, 1, 4}, { 12000000, 594000000, 594, 12, 1, 8}, { 13000000, 594000000, 594, 13, 1, 8}, { 16800000, 594000000, 495, 14, 1, 8}, { 19200000, 594000000, 495, 16, 1, 8}, { 26000000, 594000000, 594, 26, 1, 8}, { 12000000, 1000000000, 1000, 12, 1, 12}, { 13000000, 1000000000, 1000, 13, 1, 12}, { 19200000, 1000000000, 625, 12, 1, 8}, { 26000000, 1000000000, 1000, 26, 1, 12}, { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_d = { .name = "pll_d", .flags = PLL_HAS_CPCON | PLLD, .ops = &tegra_plld_ops, .reg = 0xd0, .parent = &tegra_pll_ref, .max_rate = 1000000000, .u.pll = { .input_min = 2000000, .input_max = 40000000, .cf_min = 1000000, .cf_max = 6000000, .vco_min = 40000000, .vco_max = 1000000000, .freq_table = tegra_pll_d_freq_table, .lock_delay = 1000, }, }; static struct clk tegra_pll_d_out0 = { .name = "pll_d_out0", .ops = &tegra_pll_div_ops, .flags = DIV_2 | PLLD, .parent = &tegra_pll_d, .max_rate = 500000000, }; static struct clk tegra_pll_d2 = { .name = "pll_d2", .flags = PLL_HAS_CPCON | PLL_ALT_MISC_REG | PLLD, .ops = &tegra_plld_ops, .reg = 0x4b8, .parent = &tegra_pll_ref, .max_rate = 1000000000, .u.pll = { .input_min = 2000000, .input_max = 40000000, .cf_min = 1000000, .cf_max = 6000000, .vco_min = 40000000, .vco_max = 1000000000, .freq_table = tegra_pll_d_freq_table, .lock_delay = 1000, }, }; static struct clk tegra_pll_d2_out0 = { .name = "pll_d2_out0", .ops = &tegra_pll_div_ops, .flags = DIV_2 | PLLD, .parent = &tegra_pll_d2, .max_rate = 500000000, }; static struct clk_pll_freq_table tegra_pll_u_freq_table[] = { { 12000000, 480000000, 960, 12, 2, 12}, { 13000000, 480000000, 960, 13, 2, 12}, { 16800000, 480000000, 400, 7, 2, 5}, { 19200000, 480000000, 200, 4, 2, 3}, { 26000000, 480000000, 960, 26, 2, 12}, { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_u = { .name = "pll_u", .flags = PLL_HAS_CPCON | PLLU, .ops = &tegra_pll_ops, .reg = 0xc0, .parent = &tegra_pll_ref, .max_rate = 480000000, .u.pll = { .input_min = 2000000, .input_max = 40000000, .cf_min = 1000000, .cf_max = 6000000, .vco_min = 480000000, .vco_max = 960000000, .freq_table = tegra_pll_u_freq_table, .lock_delay = 1000, }, }; static struct clk_pll_freq_table tegra_pll_x_freq_table[] = { /* 1.7 GHz */ { 12000000, 1700000000, 850, 6, 1, 8}, { 13000000, 1700000000, 915, 7, 1, 8}, /* actual: 1699.2 MHz */ { 16800000, 1700000000, 708, 7, 1, 8}, /* actual: 1699.2 MHz */ { 19200000, 1700000000, 885, 10, 1, 8}, /* actual: 1699.2 MHz */ { 26000000, 1700000000, 850, 13, 1, 8}, /* 1.6 GHz */ { 12000000, 1600000000, 800, 6, 1, 8}, { 13000000, 1600000000, 738, 6, 1, 8}, /* actual: 1599.0 MHz */ { 16800000, 1600000000, 857, 9, 1, 8}, /* actual: 1599.7 MHz */ { 19200000, 1600000000, 500, 6, 1, 8}, { 26000000, 1600000000, 800, 13, 1, 8}, /* 1.5 GHz */ { 12000000, 1500000000, 750, 6, 1, 8}, { 13000000, 1500000000, 923, 8, 1, 8}, /* actual: 1499.8 MHz */ { 16800000, 1500000000, 625, 7, 1, 8}, { 19200000, 1500000000, 625, 8, 1, 8}, { 26000000, 1500000000, 750, 13, 1, 8}, /* 1.4 GHz */ { 12000000, 1400000000, 700, 6, 1, 8}, { 13000000, 1400000000, 969, 9, 1, 8}, /* actual: 1399.7 MHz */ { 16800000, 1400000000, 1000, 12, 1, 8}, { 19200000, 1400000000, 875, 12, 1, 8}, { 26000000, 1400000000, 700, 13, 1, 8}, /* 1.3 GHz */ { 12000000, 1300000000, 975, 9, 1, 8}, { 13000000, 1300000000, 1000, 10, 1, 8}, { 16800000, 1300000000, 928, 12, 1, 8}, /* actual: 1299.2 MHz */ { 19200000, 1300000000, 812, 12, 1, 8}, /* actual: 1299.2 MHz */ { 26000000, 1300000000, 650, 13, 1, 8}, /* 1.2 GHz */ { 12000000, 1200000000, 1000, 10, 1, 8}, { 13000000, 1200000000, 923, 10, 1, 8}, /* actual: 1199.9 MHz */ { 16800000, 1200000000, 1000, 14, 1, 8}, { 19200000, 1200000000, 1000, 16, 1, 8}, { 26000000, 1200000000, 600, 13, 1, 8}, /* 1.1 GHz */ { 12000000, 1100000000, 825, 9, 1, 8}, { 13000000, 1100000000, 846, 10, 1, 8}, /* actual: 1099.8 MHz */ { 16800000, 1100000000, 982, 15, 1, 8}, /* actual: 1099.8 MHz */ { 19200000, 1100000000, 859, 15, 1, 8}, /* actual: 1099.5 MHz */ { 26000000, 1100000000, 550, 13, 1, 8}, /* 1 GHz */ { 12000000, 1000000000, 1000, 12, 1, 8}, { 13000000, 1000000000, 1000, 13, 1, 8}, { 16800000, 1000000000, 833, 14, 1, 8}, /* actual: 999.6 MHz */ { 19200000, 1000000000, 625, 12, 1, 8}, { 26000000, 1000000000, 1000, 26, 1, 8}, { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_x = { .name = "pll_x", .flags = PLL_HAS_CPCON | PLL_ALT_MISC_REG | PLLX, .ops = &tegra_pll_ops, .reg = 0xe0, .parent = &tegra_pll_ref, .max_rate = 1700000000, .u.pll = { .input_min = 2000000, .input_max = 31000000, .cf_min = 1000000, .cf_max = 6000000, .vco_min = 20000000, .vco_max = 1700000000, .freq_table = tegra_pll_x_freq_table, .lock_delay = 300, }, }; static struct clk tegra_pll_x_out0 = { .name = "pll_x_out0", .ops = &tegra_pll_div_ops, .flags = DIV_2 | PLLX, .parent = &tegra_pll_x, .max_rate = 850000000, }; static struct clk_pll_freq_table tegra_pll_e_freq_table[] = { /* PLLE special case: use cpcon field to store cml divider value */ { 12000000, 100000000, 150, 1, 18, 11}, { 216000000, 100000000, 200, 18, 24, 13}, { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_e = { .name = "pll_e", .flags = PLL_ALT_MISC_REG, .ops = &tegra_plle_ops, .reg = 0xe8, .max_rate = 100000000, .u.pll = { .input_min = 12000000, .input_max = 216000000, .cf_min = 12000000, .cf_max = 12000000, .vco_min = 1200000000, .vco_max = 2400000000U, .freq_table = tegra_pll_e_freq_table, .lock_delay = 300, .fixed_rate = 100000000, }, }; static struct clk tegra_cml0_clk = { .name = "cml0", .parent = &tegra_pll_e, .ops = &tegra_cml_clk_ops, .reg = PLLE_AUX, .max_rate = 100000000, .u.periph = { .clk_num = 0, }, }; static struct clk tegra_cml1_clk = { .name = "cml1", .parent = &tegra_pll_e, .ops = &tegra_cml_clk_ops, .reg = PLLE_AUX, .max_rate = 100000000, .u.periph = { .clk_num = 1, }, }; static struct clk tegra_pciex_clk = { .name = "pciex", .parent = &tegra_pll_e, .ops = &tegra_pciex_clk_ops, .max_rate = 100000000, .u.periph = { .clk_num = 74, }, }; /* Audio sync clocks */ #define SYNC_SOURCE(_id) \ { \ .name = #_id "_sync", \ .rate = 24000000, \ .max_rate = 24000000, \ .ops = &tegra_sync_source_ops \ } static struct clk tegra_sync_source_list[] = { SYNC_SOURCE(spdif_in), SYNC_SOURCE(i2s0), SYNC_SOURCE(i2s1), SYNC_SOURCE(i2s2), SYNC_SOURCE(i2s3), SYNC_SOURCE(i2s4), SYNC_SOURCE(vimclk), }; static struct clk_mux_sel mux_audio_sync_clk[] = { { .input = &tegra_sync_source_list[0], .value = 0}, { .input = &tegra_sync_source_list[1], .value = 1}, { .input = &tegra_sync_source_list[2], .value = 2}, { .input = &tegra_sync_source_list[3], .value = 3}, { .input = &tegra_sync_source_list[4], .value = 4}, { .input = &tegra_sync_source_list[5], .value = 5}, { .input = &tegra_pll_a_out0, .value = 6}, { .input = &tegra_sync_source_list[6], .value = 7}, { 0, 0 } }; #define AUDIO_SYNC_CLK(_id, _index) \ { \ .name = #_id, \ .inputs = mux_audio_sync_clk, \ .reg = 0x4A0 + (_index) * 4, \ .max_rate = 24000000, \ .ops = &tegra_audio_sync_clk_ops \ } static struct clk tegra_clk_audio_list[] = { AUDIO_SYNC_CLK(audio0, 0), AUDIO_SYNC_CLK(audio1, 1), AUDIO_SYNC_CLK(audio2, 2), AUDIO_SYNC_CLK(audio3, 3), AUDIO_SYNC_CLK(audio4, 4), AUDIO_SYNC_CLK(audio, 5), /* SPDIF */ }; #define AUDIO_SYNC_2X_CLK(_id, _index) \ { \ .name = #_id "_2x", \ .flags = PERIPH_NO_RESET, \ .max_rate = 48000000, \ .ops = &tegra_clk_double_ops, \ .reg = 0x49C, \ .reg_shift = 24 + (_index), \ .parent = &tegra_clk_audio_list[(_index)], \ .u.periph = { \ .clk_num = 113 + (_index), \ }, \ } static struct clk tegra_clk_audio_2x_list[] = { AUDIO_SYNC_2X_CLK(audio0, 0), AUDIO_SYNC_2X_CLK(audio1, 1), AUDIO_SYNC_2X_CLK(audio2, 2), AUDIO_SYNC_2X_CLK(audio3, 3), AUDIO_SYNC_2X_CLK(audio4, 4), AUDIO_SYNC_2X_CLK(audio, 5), /* SPDIF */ }; #define MUX_I2S_SPDIF(_id, _index) \ static struct clk_mux_sel mux_pllaout0_##_id##_2x_pllp_clkm[] = { \ {.input = &tegra_pll_a_out0, .value = 0}, \ {.input = &tegra_clk_audio_2x_list[(_index)], .value = 1}, \ {.input = &tegra_pll_p, .value = 2}, \ {.input = &tegra_clk_m, .value = 3}, \ { 0, 0}, \ } MUX_I2S_SPDIF(audio0, 0); MUX_I2S_SPDIF(audio1, 1); MUX_I2S_SPDIF(audio2, 2); MUX_I2S_SPDIF(audio3, 3); MUX_I2S_SPDIF(audio4, 4); MUX_I2S_SPDIF(audio, 5); /* SPDIF */ /* External clock outputs (through PMC) */ #define MUX_EXTERN_OUT(_id) \ static struct clk_mux_sel mux_clkm_clkm2_clkm4_extern##_id[] = { \ {.input = &tegra_clk_m, .value = 0}, \ {.input = &tegra_clk_m_div2, .value = 1}, \ {.input = &tegra_clk_m_div4, .value = 2}, \ {.input = NULL, .value = 3}, /* placeholder */ \ { 0, 0}, \ } MUX_EXTERN_OUT(1); MUX_EXTERN_OUT(2); MUX_EXTERN_OUT(3); static struct clk_mux_sel *mux_extern_out_list[] = { mux_clkm_clkm2_clkm4_extern1, mux_clkm_clkm2_clkm4_extern2, mux_clkm_clkm2_clkm4_extern3, }; #define CLK_OUT_CLK(_id) \ { \ .name = "clk_out_" #_id, \ .lookup = { \ .dev_id = "clk_out_" #_id, \ .con_id = "extern" #_id, \ }, \ .ops = &tegra_clk_out_ops, \ .reg = 0x1a8, \ .inputs = mux_clkm_clkm2_clkm4_extern##_id, \ .flags = MUX_CLK_OUT, \ .max_rate = 216000000, \ .u.periph = { \ .clk_num = (_id - 1) * 8 + 2, \ }, \ } static struct clk tegra_clk_out_list[] = { CLK_OUT_CLK(1), CLK_OUT_CLK(2), CLK_OUT_CLK(3), }; /* called after peripheral external clocks are initialized */ static void init_clk_out_mux(void) { int i; struct clk *c; /* output clock con_id is the name of peripheral external clock connected to input 3 of the output mux */ for (i = 0; i < ARRAY_SIZE(tegra_clk_out_list); i++) { c = tegra_get_clock_by_name( tegra_clk_out_list[i].lookup.con_id); if (!c) pr_err("%s: could not find clk %s\n", __func__, tegra_clk_out_list[i].lookup.con_id); mux_extern_out_list[i][3].input = c; } } /* Peripheral muxes */ static struct clk_mux_sel mux_sclk[] = { { .input = &tegra_clk_m, .value = 0}, { .input = &tegra_pll_c_out1, .value = 1}, { .input = &tegra_pll_p_out4, .value = 2}, { .input = &tegra_pll_p_out3, .value = 3}, { .input = &tegra_pll_p_out2, .value = 4}, /* { .input = &tegra_clk_d, .value = 5}, - no use on tegra30 */ { .input = &tegra_clk_32k, .value = 6}, { .input = &tegra_pll_m_out1, .value = 7}, { 0, 0}, }; static struct clk tegra_clk_sclk = { .name = "sclk", .inputs = mux_sclk, .reg = 0x28, .ops = &tegra_super_ops, .max_rate = 334000000, .min_rate = 40000000, }; static struct clk tegra_clk_blink = { .name = "blink", .parent = &tegra_clk_32k, .reg = 0x40, .ops = &tegra_blink_clk_ops, .max_rate = 32768, }; static struct clk_mux_sel mux_pllm_pllc_pllp_plla[] = { { .input = &tegra_pll_m, .value = 0}, { .input = &tegra_pll_c, .value = 1}, { .input = &tegra_pll_p, .value = 2}, { .input = &tegra_pll_a_out0, .value = 3}, { 0, 0}, }; static struct clk_mux_sel mux_pllp_pllc_pllm_clkm[] = { { .input = &tegra_pll_p, .value = 0}, { .input = &tegra_pll_c, .value = 1}, { .input = &tegra_pll_m, .value = 2}, { .input = &tegra_clk_m, .value = 3}, { 0, 0}, }; static struct clk_mux_sel mux_pllp_clkm[] = { { .input = &tegra_pll_p, .value = 0}, { .input = &tegra_clk_m, .value = 3}, { 0, 0}, }; static struct clk_mux_sel mux_pllp_plld_pllc_clkm[] = { {.input = &tegra_pll_p, .value = 0}, {.input = &tegra_pll_d_out0, .value = 1}, {.input = &tegra_pll_c, .value = 2}, {.input = &tegra_clk_m, .value = 3}, { 0, 0}, }; static struct clk_mux_sel mux_pllp_pllm_plld_plla_pllc_plld2_clkm[] = { {.input = &tegra_pll_p, .value = 0}, {.input = &tegra_pll_m, .value = 1}, {.input = &tegra_pll_d_out0, .value = 2}, {.input = &tegra_pll_a_out0, .value = 3}, {.input = &tegra_pll_c, .value = 4}, {.input = &tegra_pll_d2_out0, .value = 5}, {.input = &tegra_clk_m, .value = 6}, { 0, 0}, }; static struct clk_mux_sel mux_plla_pllc_pllp_clkm[] = { { .input = &tegra_pll_a_out0, .value = 0}, /* { .input = &tegra_pll_c, .value = 1}, no use on tegra30 */ { .input = &tegra_pll_p, .value = 2}, { .input = &tegra_clk_m, .value = 3}, { 0, 0}, }; static struct clk_mux_sel mux_pllp_pllc_clk32_clkm[] = { {.input = &tegra_pll_p, .value = 0}, {.input = &tegra_pll_c, .value = 1}, {.input = &tegra_clk_32k, .value = 2}, {.input = &tegra_clk_m, .value = 3}, { 0, 0}, }; static struct clk_mux_sel mux_pllp_pllc_clkm_clk32[] = { {.input = &tegra_pll_p, .value = 0}, {.input = &tegra_pll_c, .value = 1}, {.input = &tegra_clk_m, .value = 2}, {.input = &tegra_clk_32k, .value = 3}, { 0, 0}, }; static struct clk_mux_sel mux_pllp_pllc_pllm[] = { {.input = &tegra_pll_p, .value = 0}, {.input = &tegra_pll_c, .value = 1}, {.input = &tegra_pll_m, .value = 2}, { 0, 0}, }; static struct clk_mux_sel mux_clk_m[] = { { .input = &tegra_clk_m, .value = 0}, { 0, 0}, }; static struct clk_mux_sel mux_pllp_out3[] = { { .input = &tegra_pll_p_out3, .value = 0}, { 0, 0}, }; static struct clk_mux_sel mux_plld_out0[] = { { .input = &tegra_pll_d_out0, .value = 0}, { 0, 0}, }; static struct clk_mux_sel mux_plld_out0_plld2_out0[] = { { .input = &tegra_pll_d_out0, .value = 0}, { .input = &tegra_pll_d2_out0, .value = 1}, { 0, 0}, }; static struct clk_mux_sel mux_clk_32k[] = { { .input = &tegra_clk_32k, .value = 0}, { 0, 0}, }; static struct clk_mux_sel mux_plla_clk32_pllp_clkm_plle[] = { { .input = &tegra_pll_a_out0, .value = 0}, { .input = &tegra_clk_32k, .value = 1}, { .input = &tegra_pll_p, .value = 2}, { .input = &tegra_clk_m, .value = 3}, { .input = &tegra_pll_e, .value = 4}, { 0, 0}, }; static struct clk_mux_sel mux_cclk_g[] = { { .input = &tegra_clk_m, .value = 0}, { .input = &tegra_pll_c, .value = 1}, { .input = &tegra_clk_32k, .value = 2}, { .input = &tegra_pll_m, .value = 3}, { .input = &tegra_pll_p, .value = 4}, { .input = &tegra_pll_p_out4, .value = 5}, { .input = &tegra_pll_p_out3, .value = 6}, { .input = &tegra_pll_x, .value = 8}, { 0, 0}, }; static struct clk tegra_clk_cclk_g = { .name = "cclk_g", .flags = DIV_U71 | DIV_U71_INT, .inputs = mux_cclk_g, .reg = 0x368, .ops = &tegra_super_ops, .max_rate = 1700000000, }; static struct clk tegra30_clk_twd = { .parent = &tegra_clk_cclk_g, .name = "twd", .ops = &tegra30_twd_ops, .max_rate = 1400000000, /* Same as tegra_clk_cpu_cmplx.max_rate */ .mul = 1, .div = 2, }; #define PERIPH_CLK(_name, _dev, _con, _clk_num, _reg, _max, _inputs, _flags) \ { \ .name = _name, \ .lookup = { \ .dev_id = _dev, \ .con_id = _con, \ }, \ .ops = &tegra_periph_clk_ops, \ .reg = _reg, \ .inputs = _inputs, \ .flags = _flags, \ .max_rate = _max, \ .u.periph = { \ .clk_num = _clk_num, \ }, \ } #define PERIPH_CLK_EX(_name, _dev, _con, _clk_num, _reg, _max, _inputs, \ _flags, _ops) \ { \ .name = _name, \ .lookup = { \ .dev_id = _dev, \ .con_id = _con, \ }, \ .ops = _ops, \ .reg = _reg, \ .inputs = _inputs, \ .flags = _flags, \ .max_rate = _max, \ .u.periph = { \ .clk_num = _clk_num, \ }, \ } #define SHARED_CLK(_name, _dev, _con, _parent, _id, _div, _mode)\ { \ .name = _name, \ .lookup = { \ .dev_id = _dev, \ .con_id = _con, \ }, \ .ops = &tegra_clk_shared_bus_ops, \ .parent = _parent, \ .u.shared_bus_user = { \ .client_id = _id, \ .client_div = _div, \ .mode = _mode, \ }, \ } struct clk tegra_list_clks[] = { PERIPH_CLK("apbdma", "tegra-dma", NULL, 34, 0, 26000000, mux_clk_m, 0), PERIPH_CLK("rtc", "rtc-tegra", NULL, 4, 0, 32768, mux_clk_32k, PERIPH_NO_RESET | PERIPH_ON_APB), PERIPH_CLK("kbc", "tegra-kbc", NULL, 36, 0, 32768, mux_clk_32k, PERIPH_NO_RESET | PERIPH_ON_APB), PERIPH_CLK("timer", "timer", NULL, 5, 0, 26000000, mux_clk_m, 0), PERIPH_CLK("kfuse", "kfuse-tegra", NULL, 40, 0, 26000000, mux_clk_m, 0), PERIPH_CLK("fuse", "fuse-tegra", "fuse", 39, 0, 26000000, mux_clk_m, PERIPH_ON_APB), PERIPH_CLK("fuse_burn", "fuse-tegra", "fuse_burn", 39, 0, 26000000, mux_clk_m, PERIPH_ON_APB), PERIPH_CLK("apbif", "tegra30-ahub", "apbif", 107, 0, 26000000, mux_clk_m, 0), PERIPH_CLK("i2s0", "tegra30-i2s.0", NULL, 30, 0x1d8, 26000000, mux_pllaout0_audio0_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("i2s1", "tegra30-i2s.1", NULL, 11, 0x100, 26000000, mux_pllaout0_audio1_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("i2s2", "tegra30-i2s.2", NULL, 18, 0x104, 26000000, mux_pllaout0_audio2_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("i2s3", "tegra30-i2s.3", NULL, 101, 0x3bc, 26000000, mux_pllaout0_audio3_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("i2s4", "tegra30-i2s.4", NULL, 102, 0x3c0, 26000000, mux_pllaout0_audio4_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("spdif_out", "tegra30-spdif", "spdif_out", 10, 0x108, 100000000, mux_pllaout0_audio_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("spdif_in", "tegra30-spdif", "spdif_in", 10, 0x10c, 100000000, mux_pllp_pllc_pllm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("pwm", "pwm", NULL, 17, 0x110, 432000000, mux_pllp_pllc_clk32_clkm, MUX | MUX_PWM | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("d_audio", "tegra30-ahub", "d_audio", 106, 0x3d0, 48000000, mux_plla_pllc_pllp_clkm, MUX | DIV_U71), PERIPH_CLK("dam0", "tegra30-dam.0", NULL, 108, 0x3d8, 48000000, mux_plla_pllc_pllp_clkm, MUX | DIV_U71), PERIPH_CLK("dam1", "tegra30-dam.1", NULL, 109, 0x3dc, 48000000, mux_plla_pllc_pllp_clkm, MUX | DIV_U71), PERIPH_CLK("dam2", "tegra30-dam.2", NULL, 110, 0x3e0, 48000000, mux_plla_pllc_pllp_clkm, MUX | DIV_U71), PERIPH_CLK("hda", "tegra30-hda", "hda", 125, 0x428, 108000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), PERIPH_CLK("hda2codec_2x", "tegra30-hda", "hda2codec", 111, 0x3e4, 48000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), PERIPH_CLK("hda2hdmi", "tegra30-hda", "hda2hdmi", 128, 0, 48000000, mux_clk_m, 0), PERIPH_CLK("sbc1", "spi_tegra.0", NULL, 41, 0x134, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sbc2", "spi_tegra.1", NULL, 44, 0x118, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sbc3", "spi_tegra.2", NULL, 46, 0x11c, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sbc4", "spi_tegra.3", NULL, 68, 0x1b4, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sbc5", "spi_tegra.4", NULL, 104, 0x3c8, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sbc6", "spi_tegra.5", NULL, 105, 0x3cc, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sata_oob", "tegra_sata_oob", NULL, 123, 0x420, 216000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), PERIPH_CLK("sata", "tegra_sata", NULL, 124, 0x424, 216000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), PERIPH_CLK("sata_cold", "tegra_sata_cold", NULL, 129, 0, 48000000, mux_clk_m, 0), PERIPH_CLK_EX("ndflash", "tegra_nand", NULL, 13, 0x160, 240000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71, &tegra_nand_clk_ops), PERIPH_CLK("ndspeed", "tegra_nand_speed", NULL, 80, 0x3f8, 240000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), PERIPH_CLK("vfir", "vfir", NULL, 7, 0x168, 72000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sdmmc1", "sdhci-tegra.0", NULL, 14, 0x150, 208000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ PERIPH_CLK("sdmmc2", "sdhci-tegra.1", NULL, 9, 0x154, 104000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ PERIPH_CLK("sdmmc3", "sdhci-tegra.2", NULL, 69, 0x1bc, 208000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ PERIPH_CLK("sdmmc4", "sdhci-tegra.3", NULL, 15, 0x164, 104000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ PERIPH_CLK("vcp", "tegra-avp", "vcp", 29, 0, 250000000, mux_clk_m, 0), PERIPH_CLK("bsea", "tegra-avp", "bsea", 62, 0, 250000000, mux_clk_m, 0), PERIPH_CLK("bsev", "tegra-aes", "bsev", 63, 0, 250000000, mux_clk_m, 0), PERIPH_CLK("vde", "vde", NULL, 61, 0x1c8, 520000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_INT), PERIPH_CLK("csite", "csite", NULL, 73, 0x1d4, 144000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* max rate ??? */ PERIPH_CLK("la", "la", NULL, 76, 0x1f8, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), PERIPH_CLK("owr", "tegra_w1", NULL, 71, 0x1cc, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("nor", "nor", NULL, 42, 0x1d0, 127000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* requires min voltage */ PERIPH_CLK("mipi", "mipi", NULL, 50, 0x174, 60000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), /* scales with voltage */ PERIPH_CLK("i2c1", "tegra-i2c.0", NULL, 12, 0x124, 26000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB), PERIPH_CLK("i2c2", "tegra-i2c.1", NULL, 54, 0x198, 26000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB), PERIPH_CLK("i2c3", "tegra-i2c.2", NULL, 67, 0x1b8, 26000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB), PERIPH_CLK("i2c4", "tegra-i2c.3", NULL, 103, 0x3c4, 26000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB), PERIPH_CLK("i2c5", "tegra-i2c.4", NULL, 47, 0x128, 26000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB), PERIPH_CLK("uarta", "tegra_uart.0", NULL, 6, 0x178, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB), PERIPH_CLK("uartb", "tegra_uart.1", NULL, 7, 0x17c, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB), PERIPH_CLK("uartc", "tegra_uart.2", NULL, 55, 0x1a0, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB), PERIPH_CLK("uartd", "tegra_uart.3", NULL, 65, 0x1c0, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB), PERIPH_CLK("uarte", "tegra_uart.4", NULL, 66, 0x1c4, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB), PERIPH_CLK("uarta_dbg", "serial8250.0", "uarta", 6, 0x178, 800000000, mux_pllp_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB), PERIPH_CLK("uartb_dbg", "serial8250.0", "uartb", 7, 0x17c, 800000000, mux_pllp_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB), PERIPH_CLK("uartc_dbg", "serial8250.0", "uartc", 55, 0x1a0, 800000000, mux_pllp_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB), PERIPH_CLK("uartd_dbg", "serial8250.0", "uartd", 65, 0x1c0, 800000000, mux_pllp_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB), PERIPH_CLK("uarte_dbg", "serial8250.0", "uarte", 66, 0x1c4, 800000000, mux_pllp_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB), PERIPH_CLK_EX("vi", "tegra_camera", "vi", 20, 0x148, 425000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | DIV_U71_INT, &tegra_vi_clk_ops), PERIPH_CLK("3d", "3d", NULL, 24, 0x158, 520000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | DIV_U71_INT | DIV_U71_IDLE | PERIPH_MANUAL_RESET), PERIPH_CLK("3d2", "3d2", NULL, 98, 0x3b0, 520000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | DIV_U71_INT | DIV_U71_IDLE | PERIPH_MANUAL_RESET), PERIPH_CLK("2d", "2d", NULL, 21, 0x15c, 520000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | DIV_U71_INT | DIV_U71_IDLE), PERIPH_CLK("vi_sensor", "tegra_camera", "vi_sensor", 20, 0x1a8, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_NO_RESET), PERIPH_CLK("epp", "epp", NULL, 19, 0x16c, 520000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | DIV_U71_INT), PERIPH_CLK("mpe", "mpe", NULL, 60, 0x170, 520000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | DIV_U71_INT), PERIPH_CLK("host1x", "host1x", NULL, 28, 0x180, 260000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | DIV_U71_INT), PERIPH_CLK("cve", "cve", NULL, 49, 0x140, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */ PERIPH_CLK("tvo", "tvo", NULL, 49, 0x188, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */ PERIPH_CLK_EX("dtv", "dtv", NULL, 79, 0x1dc, 250000000, mux_clk_m, 0, &tegra_dtv_clk_ops), PERIPH_CLK("hdmi", "hdmi", NULL, 51, 0x18c, 148500000, mux_pllp_pllm_plld_plla_pllc_plld2_clkm, MUX | MUX8 | DIV_U71), PERIPH_CLK("tvdac", "tvdac", NULL, 53, 0x194, 220000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */ PERIPH_CLK("disp1", "tegradc.0", NULL, 27, 0x138, 600000000, mux_pllp_pllm_plld_plla_pllc_plld2_clkm, MUX | MUX8), PERIPH_CLK("disp2", "tegradc.1", NULL, 26, 0x13c, 600000000, mux_pllp_pllm_plld_plla_pllc_plld2_clkm, MUX | MUX8), PERIPH_CLK("usbd", "fsl-tegra-udc", NULL, 22, 0, 480000000, mux_clk_m, 0), /* requires min voltage */ PERIPH_CLK("usb2", "tegra-ehci.1", NULL, 58, 0, 480000000, mux_clk_m, 0), /* requires min voltage */ PERIPH_CLK("usb3", "tegra-ehci.2", NULL, 59, 0, 480000000, mux_clk_m, 0), /* requires min voltage */ PERIPH_CLK("dsia", "tegradc.0", "dsia", 48, 0, 500000000, mux_plld_out0, 0), PERIPH_CLK_EX("dsib", "tegradc.1", "dsib", 82, 0xd0, 500000000, mux_plld_out0_plld2_out0, MUX | PLLD, &tegra_dsib_clk_ops), PERIPH_CLK("csi", "tegra_camera", "csi", 52, 0, 102000000, mux_pllp_out3, 0), PERIPH_CLK("isp", "tegra_camera", "isp", 23, 0, 150000000, mux_clk_m, 0), /* same frequency as VI */ PERIPH_CLK("csus", "tegra_camera", "csus", 92, 0, 150000000, mux_clk_m, PERIPH_NO_RESET), PERIPH_CLK("tsensor", "tegra-tsensor", NULL, 100, 0x3b8, 216000000, mux_pllp_pllc_clkm_clk32, MUX | DIV_U71), PERIPH_CLK("actmon", "actmon", NULL, 119, 0x3e8, 216000000, mux_pllp_pllc_clk32_clkm, MUX | DIV_U71), PERIPH_CLK("extern1", "extern1", NULL, 120, 0x3ec, 216000000, mux_plla_clk32_pllp_clkm_plle, MUX | MUX8 | DIV_U71), PERIPH_CLK("extern2", "extern2", NULL, 121, 0x3f0, 216000000, mux_plla_clk32_pllp_clkm_plle, MUX | MUX8 | DIV_U71), PERIPH_CLK("extern3", "extern3", NULL, 122, 0x3f4, 216000000, mux_plla_clk32_pllp_clkm_plle, MUX | MUX8 | DIV_U71), PERIPH_CLK("i2cslow", "i2cslow", NULL, 81, 0x3fc, 26000000, mux_pllp_pllc_clk32_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("pcie", "tegra-pcie", "pcie", 70, 0, 250000000, mux_clk_m, 0), PERIPH_CLK("afi", "tegra-pcie", "afi", 72, 0, 250000000, mux_clk_m, 0), PERIPH_CLK("se", "se", NULL, 127, 0x42c, 520000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_INT), }; #define CLK_DUPLICATE(_name, _dev, _con) \ { \ .name = _name, \ .lookup = { \ .dev_id = _dev, \ .con_id = _con, \ }, \ } /* Some clocks may be used by different drivers depending on the board * configuration. List those here to register them twice in the clock lookup * table under two names. */ struct clk_duplicate tegra_clk_duplicates[] = { CLK_DUPLICATE("usbd", "utmip-pad", NULL), CLK_DUPLICATE("usbd", "tegra-ehci.0", NULL), CLK_DUPLICATE("usbd", "tegra-otg", NULL), CLK_DUPLICATE("hdmi", "tegradc.0", "hdmi"), CLK_DUPLICATE("hdmi", "tegradc.1", "hdmi"), CLK_DUPLICATE("dsib", "tegradc.0", "dsib"), CLK_DUPLICATE("dsia", "tegradc.1", "dsia"), CLK_DUPLICATE("pwm", "tegra_pwm.0", NULL), CLK_DUPLICATE("pwm", "tegra_pwm.1", NULL), CLK_DUPLICATE("pwm", "tegra_pwm.2", NULL), CLK_DUPLICATE("pwm", "tegra_pwm.3", NULL), CLK_DUPLICATE("bsev", "tegra-avp", "bsev"), CLK_DUPLICATE("bsev", "nvavp", "bsev"), CLK_DUPLICATE("vde", "tegra-aes", "vde"), CLK_DUPLICATE("bsea", "tegra-aes", "bsea"), CLK_DUPLICATE("bsea", "nvavp", "bsea"), CLK_DUPLICATE("cml1", "tegra_sata_cml", NULL), CLK_DUPLICATE("cml0", "tegra_pcie", "cml"), CLK_DUPLICATE("pciex", "tegra_pcie", "pciex"), CLK_DUPLICATE("i2c1", "tegra-i2c-slave.0", NULL), CLK_DUPLICATE("i2c2", "tegra-i2c-slave.1", NULL), CLK_DUPLICATE("i2c3", "tegra-i2c-slave.2", NULL), CLK_DUPLICATE("i2c4", "tegra-i2c-slave.3", NULL), CLK_DUPLICATE("i2c5", "tegra-i2c-slave.4", NULL), CLK_DUPLICATE("sbc1", "spi_slave_tegra.0", NULL), CLK_DUPLICATE("sbc2", "spi_slave_tegra.1", NULL), CLK_DUPLICATE("sbc3", "spi_slave_tegra.2", NULL), CLK_DUPLICATE("sbc4", "spi_slave_tegra.3", NULL), CLK_DUPLICATE("sbc5", "spi_slave_tegra.4", NULL), CLK_DUPLICATE("sbc6", "spi_slave_tegra.5", NULL), CLK_DUPLICATE("twd", "smp_twd", NULL), CLK_DUPLICATE("vcp", "nvavp", "vcp"), }; struct clk *tegra_ptr_clks[] = { &tegra_clk_32k, &tegra_clk_m, &tegra_clk_m_div2, &tegra_clk_m_div4, &tegra_pll_ref, &tegra_pll_m, &tegra_pll_m_out1, &tegra_pll_c, &tegra_pll_c_out1, &tegra_pll_p, &tegra_pll_p_out1, &tegra_pll_p_out2, &tegra_pll_p_out3, &tegra_pll_p_out4, &tegra_pll_a, &tegra_pll_a_out0, &tegra_pll_d, &tegra_pll_d_out0, &tegra_pll_d2, &tegra_pll_d2_out0, &tegra_pll_u, &tegra_pll_x, &tegra_pll_x_out0, &tegra_pll_e, &tegra_clk_cclk_g, &tegra_cml0_clk, &tegra_cml1_clk, &tegra_pciex_clk, &tegra_clk_sclk, &tegra_clk_blink, &tegra30_clk_twd, }; static void tegra30_init_one_clock(struct clk *c) { clk_init(c); INIT_LIST_HEAD(&c->shared_bus_list); if (!c->lookup.dev_id && !c->lookup.con_id) c->lookup.con_id = c->name; c->lookup.clk = c; clkdev_add(&c->lookup); } void __init tegra30_init_clocks(void) { int i; struct clk *c; for (i = 0; i < ARRAY_SIZE(tegra_ptr_clks); i++) tegra30_init_one_clock(tegra_ptr_clks[i]); for (i = 0; i < ARRAY_SIZE(tegra_list_clks); i++) tegra30_init_one_clock(&tegra_list_clks[i]); for (i = 0; i < ARRAY_SIZE(tegra_clk_duplicates); i++) { c = tegra_get_clock_by_name(tegra_clk_duplicates[i].name); if (!c) { pr_err("%s: Unknown duplicate clock %s\n", __func__, tegra_clk_duplicates[i].name); continue; } tegra_clk_duplicates[i].lookup.clk = c; clkdev_add(&tegra_clk_duplicates[i].lookup); } for (i = 0; i < ARRAY_SIZE(tegra_sync_source_list); i++) tegra30_init_one_clock(&tegra_sync_source_list[i]); for (i = 0; i < ARRAY_SIZE(tegra_clk_audio_list); i++) tegra30_init_one_clock(&tegra_clk_audio_list[i]); for (i = 0; i < ARRAY_SIZE(tegra_clk_audio_2x_list); i++) tegra30_init_one_clock(&tegra_clk_audio_2x_list[i]); init_clk_out_mux(); for (i = 0; i < ARRAY_SIZE(tegra_clk_out_list); i++) tegra30_init_one_clock(&tegra_clk_out_list[i]); }
gpl-2.0
Biktorgj/Tizen_b2_Kernel
drivers/dma/fsldma.c
4936
37545
/* * Freescale MPC85xx, MPC83xx DMA Engine support * * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved. * * Author: * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 * * Description: * DMA engine driver for Freescale MPC8540 DMA controller, which is * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. * The support for MPC8349 DMA controller is also added. * * This driver instructs the DMA controller to issue the PCI Read Multiple * command for PCI read operations, instead of using the default PCI Read Line * command. Please be aware that this setting may result in read pre-fetching * on some platforms. * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/dmaengine.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/of_platform.h> #include "dmaengine.h" #include "fsldma.h" #define chan_dbg(chan, fmt, arg...) \ dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg) #define chan_err(chan, fmt, arg...) \ dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) static const char msg_ld_oom[] = "No free memory for link descriptor"; /* * Register Helpers */ static void set_sr(struct fsldma_chan *chan, u32 val) { DMA_OUT(chan, &chan->regs->sr, val, 32); } static u32 get_sr(struct fsldma_chan *chan) { return DMA_IN(chan, &chan->regs->sr, 32); } static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) { DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); } static dma_addr_t get_cdar(struct fsldma_chan *chan) { return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; } static u32 get_bcr(struct fsldma_chan *chan) { return DMA_IN(chan, &chan->regs->bcr, 32); } /* * Descriptor Helpers */ static void set_desc_cnt(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, u32 count) { hw->count = CPU_TO_DMA(chan, count, 32); } static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { return DMA_TO_CPU(chan, desc->hw.count, 32); } static void set_desc_src(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t src) { u64 snoop_bits; snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); } static dma_addr_t get_desc_src(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { u64 snoop_bits; snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits; } static void set_desc_dst(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t dst) { u64 snoop_bits; snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); } static dma_addr_t get_desc_dst(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { u64 snoop_bits; snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits; } static void set_desc_next(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t next) { u64 snoop_bits; snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0; hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); } static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { u64 snoop_bits; snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0; desc->hw.next_ln_addr = CPU_TO_DMA(chan, DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL | snoop_bits, 64); } /* * DMA Engine Hardware Control Helpers */ static void dma_init(struct fsldma_chan *chan) { /* Reset the channel */ DMA_OUT(chan, &chan->regs->mr, 0, 32); switch (chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: /* Set the channel to below modes: * EIE - Error interrupt enable * EOLNIE - End of links interrupt enable * BWC - Bandwidth sharing among channels */ DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32); break; case FSL_DMA_IP_83XX: /* Set the channel to below modes: * EOTIE - End-of-transfer interrupt enable * PRC_RM - PCI read multiple */ DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM, 32); break; } } static int dma_is_idle(struct fsldma_chan *chan) { u32 sr = get_sr(chan); return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); } /* * Start the DMA controller * * Preconditions: * - the CDAR register must point to the start descriptor * - the MRn[CS] bit must be cleared */ static void dma_start(struct fsldma_chan *chan) { u32 mode; mode = DMA_IN(chan, &chan->regs->mr, 32); if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { DMA_OUT(chan, &chan->regs->bcr, 0, 32); mode |= FSL_DMA_MR_EMP_EN; } else { mode &= ~FSL_DMA_MR_EMP_EN; } if (chan->feature & FSL_DMA_CHAN_START_EXT) { mode |= FSL_DMA_MR_EMS_EN; } else { mode &= ~FSL_DMA_MR_EMS_EN; mode |= FSL_DMA_MR_CS; } DMA_OUT(chan, &chan->regs->mr, mode, 32); } static void dma_halt(struct fsldma_chan *chan) { u32 mode; int i; /* read the mode register */ mode = DMA_IN(chan, &chan->regs->mr, 32); /* * The 85xx controller supports channel abort, which will stop * the current transfer. On 83xx, this bit is the transfer error * mask bit, which should not be changed. */ if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { mode |= FSL_DMA_MR_CA; DMA_OUT(chan, &chan->regs->mr, mode, 32); mode &= ~FSL_DMA_MR_CA; } /* stop the DMA controller */ mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN); DMA_OUT(chan, &chan->regs->mr, mode, 32); /* wait for the DMA controller to become idle */ for (i = 0; i < 100; i++) { if (dma_is_idle(chan)) return; udelay(10); } if (!dma_is_idle(chan)) chan_err(chan, "DMA halt timeout!\n"); } /** * fsl_chan_set_src_loop_size - Set source address hold transfer size * @chan : Freescale DMA channel * @size : Address loop size, 0 for disable loop * * The set source address hold transfer size. The source * address hold or loop transfer size is when the DMA transfer * data from source address (SA), if the loop size is 4, the DMA will * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, * SA + 1 ... and so on. */ static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) { u32 mode; mode = DMA_IN(chan, &chan->regs->mr, 32); switch (size) { case 0: mode &= ~FSL_DMA_MR_SAHE; break; case 1: case 2: case 4: case 8: mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14); break; } DMA_OUT(chan, &chan->regs->mr, mode, 32); } /** * fsl_chan_set_dst_loop_size - Set destination address hold transfer size * @chan : Freescale DMA channel * @size : Address loop size, 0 for disable loop * * The set destination address hold transfer size. The destination * address hold or loop transfer size is when the DMA transfer * data to destination address (TA), if the loop size is 4, the DMA will * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, * TA + 1 ... and so on. */ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) { u32 mode; mode = DMA_IN(chan, &chan->regs->mr, 32); switch (size) { case 0: mode &= ~FSL_DMA_MR_DAHE; break; case 1: case 2: case 4: case 8: mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16); break; } DMA_OUT(chan, &chan->regs->mr, mode, 32); } /** * fsl_chan_set_request_count - Set DMA Request Count for external control * @chan : Freescale DMA channel * @size : Number of bytes to transfer in a single request * * The Freescale DMA channel can be controlled by the external signal DREQ#. * The DMA request count is how many bytes are allowed to transfer before * pausing the channel, after which a new assertion of DREQ# resumes channel * operation. * * A size of 0 disables external pause control. The maximum size is 1024. */ static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) { u32 mode; BUG_ON(size > 1024); mode = DMA_IN(chan, &chan->regs->mr, 32); mode |= (__ilog2(size) << 24) & 0x0f000000; DMA_OUT(chan, &chan->regs->mr, mode, 32); } /** * fsl_chan_toggle_ext_pause - Toggle channel external pause status * @chan : Freescale DMA channel * @enable : 0 is disabled, 1 is enabled. * * The Freescale DMA channel can be controlled by the external signal DREQ#. * The DMA Request Count feature should be used in addition to this feature * to set the number of bytes to transfer before pausing the channel. */ static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable) { if (enable) chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; else chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; } /** * fsl_chan_toggle_ext_start - Toggle channel external start status * @chan : Freescale DMA channel * @enable : 0 is disabled, 1 is enabled. * * If enable the external start, the channel can be started by an * external DMA start pin. So the dma_start() does not start the * transfer immediately. The DMA channel will wait for the * control pin asserted. */ static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) { if (enable) chan->feature |= FSL_DMA_CHAN_START_EXT; else chan->feature &= ~FSL_DMA_CHAN_START_EXT; } static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); if (list_empty(&chan->ld_pending)) goto out_splice; /* * Add the hardware descriptor to the chain of hardware descriptors * that already exists in memory. * * This will un-set the EOL bit of the existing transaction, and the * last link in this transaction will become the EOL descriptor. */ set_desc_next(chan, &tail->hw, desc->async_tx.phys); /* * Add the software descriptor and all children to the list * of pending transactions */ out_splice: list_splice_tail_init(&desc->tx_list, &chan->ld_pending); } static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) { struct fsldma_chan *chan = to_fsl_chan(tx->chan); struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); struct fsl_desc_sw *child; unsigned long flags; dma_cookie_t cookie; spin_lock_irqsave(&chan->desc_lock, flags); /* * assign cookies to all of the software descriptors * that make up this transaction */ list_for_each_entry(child, &desc->tx_list, node) { cookie = dma_cookie_assign(&child->async_tx); } /* put this transaction onto the tail of the pending queue */ append_ld_queue(chan, desc); spin_unlock_irqrestore(&chan->desc_lock, flags); return cookie; } /** * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. * @chan : Freescale DMA channel * * Return - The descriptor allocated. NULL for failed. */ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) { struct fsl_desc_sw *desc; dma_addr_t pdesc; desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); if (!desc) { chan_dbg(chan, "out of memory for link descriptor\n"); return NULL; } memset(desc, 0, sizeof(*desc)); INIT_LIST_HEAD(&desc->tx_list); dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); desc->async_tx.tx_submit = fsl_dma_tx_submit; desc->async_tx.phys = pdesc; #ifdef FSL_DMA_LD_DEBUG chan_dbg(chan, "LD %p allocated\n", desc); #endif return desc; } /** * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. * @chan : Freescale DMA channel * * This function will create a dma pool for descriptor allocation. * * Return - The number of descriptors allocated. */ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) { struct fsldma_chan *chan = to_fsl_chan(dchan); /* Has this channel already been allocated? */ if (chan->desc_pool) return 1; /* * We need the descriptor to be aligned to 32bytes * for meeting FSL DMA specification requirement. */ chan->desc_pool = dma_pool_create(chan->name, chan->dev, sizeof(struct fsl_desc_sw), __alignof__(struct fsl_desc_sw), 0); if (!chan->desc_pool) { chan_err(chan, "unable to allocate descriptor pool\n"); return -ENOMEM; } /* there is at least one descriptor free to be allocated */ return 1; } /** * fsldma_free_desc_list - Free all descriptors in a queue * @chan: Freescae DMA channel * @list: the list to free * * LOCKING: must hold chan->desc_lock */ static void fsldma_free_desc_list(struct fsldma_chan *chan, struct list_head *list) { struct fsl_desc_sw *desc, *_desc; list_for_each_entry_safe(desc, _desc, list, node) { list_del(&desc->node); #ifdef FSL_DMA_LD_DEBUG chan_dbg(chan, "LD %p free\n", desc); #endif dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } } static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, struct list_head *list) { struct fsl_desc_sw *desc, *_desc; list_for_each_entry_safe_reverse(desc, _desc, list, node) { list_del(&desc->node); #ifdef FSL_DMA_LD_DEBUG chan_dbg(chan, "LD %p free\n", desc); #endif dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } } /** * fsl_dma_free_chan_resources - Free all resources of the channel. * @chan : Freescale DMA channel */ static void fsl_dma_free_chan_resources(struct dma_chan *dchan) { struct fsldma_chan *chan = to_fsl_chan(dchan); unsigned long flags; chan_dbg(chan, "free all channel resources\n"); spin_lock_irqsave(&chan->desc_lock, flags); fsldma_free_desc_list(chan, &chan->ld_pending); fsldma_free_desc_list(chan, &chan->ld_running); spin_unlock_irqrestore(&chan->desc_lock, flags); dma_pool_destroy(chan->desc_pool); chan->desc_pool = NULL; } static struct dma_async_tx_descriptor * fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) { struct fsldma_chan *chan; struct fsl_desc_sw *new; if (!dchan) return NULL; chan = to_fsl_chan(dchan); new = fsl_dma_alloc_descriptor(chan); if (!new) { chan_err(chan, "%s\n", msg_ld_oom); return NULL; } new->async_tx.cookie = -EBUSY; new->async_tx.flags = flags; /* Insert the link descriptor to the LD ring */ list_add_tail(&new->node, &new->tx_list); /* Set End-of-link to the last link descriptor of new list */ set_ld_eol(chan, new); return &new->async_tx; } static struct dma_async_tx_descriptor * fsl_dma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, size_t len, unsigned long flags) { struct fsldma_chan *chan; struct fsl_desc_sw *first = NULL, *prev = NULL, *new; size_t copy; if (!dchan) return NULL; if (!len) return NULL; chan = to_fsl_chan(dchan); do { /* Allocate the link descriptor from DMA pool */ new = fsl_dma_alloc_descriptor(chan); if (!new) { chan_err(chan, "%s\n", msg_ld_oom); goto fail; } copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); set_desc_cnt(chan, &new->hw, copy); set_desc_src(chan, &new->hw, dma_src); set_desc_dst(chan, &new->hw, dma_dst); if (!first) first = new; else set_desc_next(chan, &prev->hw, new->async_tx.phys); new->async_tx.cookie = 0; async_tx_ack(&new->async_tx); prev = new; len -= copy; dma_src += copy; dma_dst += copy; /* Insert the link descriptor to the LD ring */ list_add_tail(&new->node, &first->tx_list); } while (len); new->async_tx.flags = flags; /* client is in control of this ack */ new->async_tx.cookie = -EBUSY; /* Set End-of-link to the last link descriptor of new list */ set_ld_eol(chan, new); return &first->async_tx; fail: if (!first) return NULL; fsldma_free_desc_list_reverse(chan, &first->tx_list); return NULL; } static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, struct scatterlist *dst_sg, unsigned int dst_nents, struct scatterlist *src_sg, unsigned int src_nents, unsigned long flags) { struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; struct fsldma_chan *chan = to_fsl_chan(dchan); size_t dst_avail, src_avail; dma_addr_t dst, src; size_t len; /* basic sanity checks */ if (dst_nents == 0 || src_nents == 0) return NULL; if (dst_sg == NULL || src_sg == NULL) return NULL; /* * TODO: should we check that both scatterlists have the same * TODO: number of bytes in total? Is that really an error? */ /* get prepared for the loop */ dst_avail = sg_dma_len(dst_sg); src_avail = sg_dma_len(src_sg); /* run until we are out of scatterlist entries */ while (true) { /* create the largest transaction possible */ len = min_t(size_t, src_avail, dst_avail); len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT); if (len == 0) goto fetch; dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; /* allocate and populate the descriptor */ new = fsl_dma_alloc_descriptor(chan); if (!new) { chan_err(chan, "%s\n", msg_ld_oom); goto fail; } set_desc_cnt(chan, &new->hw, len); set_desc_src(chan, &new->hw, src); set_desc_dst(chan, &new->hw, dst); if (!first) first = new; else set_desc_next(chan, &prev->hw, new->async_tx.phys); new->async_tx.cookie = 0; async_tx_ack(&new->async_tx); prev = new; /* Insert the link descriptor to the LD ring */ list_add_tail(&new->node, &first->tx_list); /* update metadata */ dst_avail -= len; src_avail -= len; fetch: /* fetch the next dst scatterlist entry */ if (dst_avail == 0) { /* no more entries: we're done */ if (dst_nents == 0) break; /* fetch the next entry: if there are no more: done */ dst_sg = sg_next(dst_sg); if (dst_sg == NULL) break; dst_nents--; dst_avail = sg_dma_len(dst_sg); } /* fetch the next src scatterlist entry */ if (src_avail == 0) { /* no more entries: we're done */ if (src_nents == 0) break; /* fetch the next entry: if there are no more: done */ src_sg = sg_next(src_sg); if (src_sg == NULL) break; src_nents--; src_avail = sg_dma_len(src_sg); } } new->async_tx.flags = flags; /* client is in control of this ack */ new->async_tx.cookie = -EBUSY; /* Set End-of-link to the last link descriptor of new list */ set_ld_eol(chan, new); return &first->async_tx; fail: if (!first) return NULL; fsldma_free_desc_list_reverse(chan, &first->tx_list); return NULL; } /** * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction * @chan: DMA channel * @sgl: scatterlist to transfer to/from * @sg_len: number of entries in @scatterlist * @direction: DMA direction * @flags: DMAEngine flags * @context: transaction context (ignored) * * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the * DMA_SLAVE API, this gets the device-specific information from the * chan->private variable. */ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { /* * This operation is not supported on the Freescale DMA controller * * However, we need to provide the function pointer to allow the * device_control() method to work. */ return NULL; } static int fsl_dma_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, unsigned long arg) { struct dma_slave_config *config; struct fsldma_chan *chan; unsigned long flags; int size; if (!dchan) return -EINVAL; chan = to_fsl_chan(dchan); switch (cmd) { case DMA_TERMINATE_ALL: spin_lock_irqsave(&chan->desc_lock, flags); /* Halt the DMA engine */ dma_halt(chan); /* Remove and free all of the descriptors in the LD queue */ fsldma_free_desc_list(chan, &chan->ld_pending); fsldma_free_desc_list(chan, &chan->ld_running); chan->idle = true; spin_unlock_irqrestore(&chan->desc_lock, flags); return 0; case DMA_SLAVE_CONFIG: config = (struct dma_slave_config *)arg; /* make sure the channel supports setting burst size */ if (!chan->set_request_count) return -ENXIO; /* we set the controller burst size depending on direction */ if (config->direction == DMA_MEM_TO_DEV) size = config->dst_addr_width * config->dst_maxburst; else size = config->src_addr_width * config->src_maxburst; chan->set_request_count(chan, size); return 0; case FSLDMA_EXTERNAL_START: /* make sure the channel supports external start */ if (!chan->toggle_ext_start) return -ENXIO; chan->toggle_ext_start(chan, arg); return 0; default: return -ENXIO; } return 0; } /** * fsldma_cleanup_descriptor - cleanup and free a single link descriptor * @chan: Freescale DMA channel * @desc: descriptor to cleanup and free * * This function is used on a descriptor which has been executed by the DMA * controller. It will run any callbacks, submit any dependencies, and then * free the descriptor. */ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { struct dma_async_tx_descriptor *txd = &desc->async_tx; struct device *dev = chan->common.device->dev; dma_addr_t src = get_desc_src(chan, desc); dma_addr_t dst = get_desc_dst(chan, desc); u32 len = get_desc_cnt(chan, desc); /* Run the link descriptor callback function */ if (txd->callback) { #ifdef FSL_DMA_LD_DEBUG chan_dbg(chan, "LD %p callback\n", desc); #endif txd->callback(txd->callback_param); } /* Run any dependencies */ dma_run_dependencies(txd); /* Unmap the dst buffer, if requested */ if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE); else dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE); } /* Unmap the src buffer, if requested */ if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) dma_unmap_single(dev, src, len, DMA_TO_DEVICE); else dma_unmap_page(dev, src, len, DMA_TO_DEVICE); } #ifdef FSL_DMA_LD_DEBUG chan_dbg(chan, "LD %p free\n", desc); #endif dma_pool_free(chan->desc_pool, desc, txd->phys); } /** * fsl_chan_xfer_ld_queue - transfer any pending transactions * @chan : Freescale DMA channel * * HARDWARE STATE: idle * LOCKING: must hold chan->desc_lock */ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) { struct fsl_desc_sw *desc; /* * If the list of pending descriptors is empty, then we * don't need to do any work at all */ if (list_empty(&chan->ld_pending)) { chan_dbg(chan, "no pending LDs\n"); return; } /* * The DMA controller is not idle, which means that the interrupt * handler will start any queued transactions when it runs after * this transaction finishes */ if (!chan->idle) { chan_dbg(chan, "DMA controller still busy\n"); return; } /* * If there are some link descriptors which have not been * transferred, we need to start the controller */ /* * Move all elements from the queue of pending transactions * onto the list of running transactions */ chan_dbg(chan, "idle, starting controller\n"); desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); list_splice_tail_init(&chan->ld_pending, &chan->ld_running); /* * The 85xx DMA controller doesn't clear the channel start bit * automatically at the end of a transfer. Therefore we must clear * it in software before starting the transfer. */ if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { u32 mode; mode = DMA_IN(chan, &chan->regs->mr, 32); mode &= ~FSL_DMA_MR_CS; DMA_OUT(chan, &chan->regs->mr, mode, 32); } /* * Program the descriptor's address into the DMA controller, * then start the DMA transaction */ set_cdar(chan, desc->async_tx.phys); get_cdar(chan); dma_start(chan); chan->idle = false; } /** * fsl_dma_memcpy_issue_pending - Issue the DMA start command * @chan : Freescale DMA channel */ static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) { struct fsldma_chan *chan = to_fsl_chan(dchan); unsigned long flags; spin_lock_irqsave(&chan->desc_lock, flags); fsl_chan_xfer_ld_queue(chan); spin_unlock_irqrestore(&chan->desc_lock, flags); } /** * fsl_tx_status - Determine the DMA status * @chan : Freescale DMA channel */ static enum dma_status fsl_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct fsldma_chan *chan = to_fsl_chan(dchan); enum dma_status ret; unsigned long flags; spin_lock_irqsave(&chan->desc_lock, flags); ret = dma_cookie_status(dchan, cookie, txstate); spin_unlock_irqrestore(&chan->desc_lock, flags); return ret; } /*----------------------------------------------------------------------------*/ /* Interrupt Handling */ /*----------------------------------------------------------------------------*/ static irqreturn_t fsldma_chan_irq(int irq, void *data) { struct fsldma_chan *chan = data; u32 stat; /* save and clear the status register */ stat = get_sr(chan); set_sr(chan, stat); chan_dbg(chan, "irq: stat = 0x%x\n", stat); /* check that this was really our device */ stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); if (!stat) return IRQ_NONE; if (stat & FSL_DMA_SR_TE) chan_err(chan, "Transfer Error!\n"); /* * Programming Error * The DMA_INTERRUPT async_tx is a NULL transfer, which will * triger a PE interrupt. */ if (stat & FSL_DMA_SR_PE) { chan_dbg(chan, "irq: Programming Error INT\n"); stat &= ~FSL_DMA_SR_PE; if (get_bcr(chan) != 0) chan_err(chan, "Programming Error!\n"); } /* * For MPC8349, EOCDI event need to update cookie * and start the next transfer if it exist. */ if (stat & FSL_DMA_SR_EOCDI) { chan_dbg(chan, "irq: End-of-Chain link INT\n"); stat &= ~FSL_DMA_SR_EOCDI; } /* * If it current transfer is the end-of-transfer, * we should clear the Channel Start bit for * prepare next transfer. */ if (stat & FSL_DMA_SR_EOLNI) { chan_dbg(chan, "irq: End-of-link INT\n"); stat &= ~FSL_DMA_SR_EOLNI; } /* check that the DMA controller is really idle */ if (!dma_is_idle(chan)) chan_err(chan, "irq: controller not idle!\n"); /* check that we handled all of the bits */ if (stat) chan_err(chan, "irq: unhandled sr 0x%08x\n", stat); /* * Schedule the tasklet to handle all cleanup of the current * transaction. It will start a new transaction if there is * one pending. */ tasklet_schedule(&chan->tasklet); chan_dbg(chan, "irq: Exit\n"); return IRQ_HANDLED; } static void dma_do_tasklet(unsigned long data) { struct fsldma_chan *chan = (struct fsldma_chan *)data; struct fsl_desc_sw *desc, *_desc; LIST_HEAD(ld_cleanup); unsigned long flags; chan_dbg(chan, "tasklet entry\n"); spin_lock_irqsave(&chan->desc_lock, flags); /* update the cookie if we have some descriptors to cleanup */ if (!list_empty(&chan->ld_running)) { dma_cookie_t cookie; desc = to_fsl_desc(chan->ld_running.prev); cookie = desc->async_tx.cookie; dma_cookie_complete(&desc->async_tx); chan_dbg(chan, "completed_cookie=%d\n", cookie); } /* * move the descriptors to a temporary list so we can drop the lock * during the entire cleanup operation */ list_splice_tail_init(&chan->ld_running, &ld_cleanup); /* the hardware is now idle and ready for more */ chan->idle = true; /* * Start any pending transactions automatically * * In the ideal case, we keep the DMA controller busy while we go * ahead and free the descriptors below. */ fsl_chan_xfer_ld_queue(chan); spin_unlock_irqrestore(&chan->desc_lock, flags); /* Run the callback for each descriptor, in order */ list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) { /* Remove from the list of transactions */ list_del(&desc->node); /* Run all cleanup for this descriptor */ fsldma_cleanup_descriptor(chan, desc); } chan_dbg(chan, "tasklet exit\n"); } static irqreturn_t fsldma_ctrl_irq(int irq, void *data) { struct fsldma_device *fdev = data; struct fsldma_chan *chan; unsigned int handled = 0; u32 gsr, mask; int i; gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs) : in_le32(fdev->regs); mask = 0xff000000; dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr); for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { chan = fdev->chan[i]; if (!chan) continue; if (gsr & mask) { dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id); fsldma_chan_irq(irq, chan); handled++; } gsr &= ~mask; mask >>= 8; } return IRQ_RETVAL(handled); } static void fsldma_free_irqs(struct fsldma_device *fdev) { struct fsldma_chan *chan; int i; if (fdev->irq != NO_IRQ) { dev_dbg(fdev->dev, "free per-controller IRQ\n"); free_irq(fdev->irq, fdev); return; } for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { chan = fdev->chan[i]; if (chan && chan->irq != NO_IRQ) { chan_dbg(chan, "free per-channel IRQ\n"); free_irq(chan->irq, chan); } } } static int fsldma_request_irqs(struct fsldma_device *fdev) { struct fsldma_chan *chan; int ret; int i; /* if we have a per-controller IRQ, use that */ if (fdev->irq != NO_IRQ) { dev_dbg(fdev->dev, "request per-controller IRQ\n"); ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED, "fsldma-controller", fdev); return ret; } /* no per-controller IRQ, use the per-channel IRQs */ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { chan = fdev->chan[i]; if (!chan) continue; if (chan->irq == NO_IRQ) { chan_err(chan, "interrupts property missing in device tree\n"); ret = -ENODEV; goto out_unwind; } chan_dbg(chan, "request per-channel IRQ\n"); ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, "fsldma-chan", chan); if (ret) { chan_err(chan, "unable to request per-channel IRQ\n"); goto out_unwind; } } return 0; out_unwind: for (/* none */; i >= 0; i--) { chan = fdev->chan[i]; if (!chan) continue; if (chan->irq == NO_IRQ) continue; free_irq(chan->irq, chan); } return ret; } /*----------------------------------------------------------------------------*/ /* OpenFirmware Subsystem */ /*----------------------------------------------------------------------------*/ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, struct device_node *node, u32 feature, const char *compatible) { struct fsldma_chan *chan; struct resource res; int err; /* alloc channel */ chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) { dev_err(fdev->dev, "no free memory for DMA channels!\n"); err = -ENOMEM; goto out_return; } /* ioremap registers for use */ chan->regs = of_iomap(node, 0); if (!chan->regs) { dev_err(fdev->dev, "unable to ioremap registers\n"); err = -ENOMEM; goto out_free_chan; } err = of_address_to_resource(node, 0, &res); if (err) { dev_err(fdev->dev, "unable to find 'reg' property\n"); goto out_iounmap_regs; } chan->feature = feature; if (!fdev->feature) fdev->feature = chan->feature; /* * If the DMA device's feature is different than the feature * of its channels, report the bug */ WARN_ON(fdev->feature != chan->feature); chan->dev = fdev->dev; chan->id = ((res.start - 0x100) & 0xfff) >> 7; if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { dev_err(fdev->dev, "too many channels for device\n"); err = -EINVAL; goto out_iounmap_regs; } fdev->chan[chan->id] = chan; tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id); /* Initialize the channel */ dma_init(chan); /* Clear cdar registers */ set_cdar(chan, 0); switch (chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; case FSL_DMA_IP_83XX: chan->toggle_ext_start = fsl_chan_toggle_ext_start; chan->set_src_loop_size = fsl_chan_set_src_loop_size; chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; chan->set_request_count = fsl_chan_set_request_count; } spin_lock_init(&chan->desc_lock); INIT_LIST_HEAD(&chan->ld_pending); INIT_LIST_HEAD(&chan->ld_running); chan->idle = true; chan->common.device = &fdev->common; dma_cookie_init(&chan->common); /* find the IRQ line, if it exists in the device tree */ chan->irq = irq_of_parse_and_map(node, 0); /* Add the channel to DMA device channel list */ list_add_tail(&chan->common.device_node, &fdev->common.channels); fdev->common.chancnt++; dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, chan->irq != NO_IRQ ? chan->irq : fdev->irq); return 0; out_iounmap_regs: iounmap(chan->regs); out_free_chan: kfree(chan); out_return: return err; } static void fsl_dma_chan_remove(struct fsldma_chan *chan) { irq_dispose_mapping(chan->irq); list_del(&chan->common.device_node); iounmap(chan->regs); kfree(chan); } static int __devinit fsldma_of_probe(struct platform_device *op) { struct fsldma_device *fdev; struct device_node *child; int err; fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); if (!fdev) { dev_err(&op->dev, "No enough memory for 'priv'\n"); err = -ENOMEM; goto out_return; } fdev->dev = &op->dev; INIT_LIST_HEAD(&fdev->common.channels); /* ioremap the registers for use */ fdev->regs = of_iomap(op->dev.of_node, 0); if (!fdev->regs) { dev_err(&op->dev, "unable to ioremap registers\n"); err = -ENOMEM; goto out_free_fdev; } /* map the channel IRQ if it exists, but don't hookup the handler yet */ fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0); dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); dma_cap_set(DMA_SG, fdev->common.cap_mask); dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; fdev->common.device_tx_status = fsl_tx_status; fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; fdev->common.device_control = fsl_dma_device_control; fdev->common.dev = &op->dev; dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); dev_set_drvdata(&op->dev, fdev); /* * We cannot use of_platform_bus_probe() because there is no * of_platform_bus_remove(). Instead, we manually instantiate every DMA * channel object. */ for_each_child_of_node(op->dev.of_node, child) { if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) { fsl_dma_chan_probe(fdev, child, FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, "fsl,eloplus-dma-channel"); } if (of_device_is_compatible(child, "fsl,elo-dma-channel")) { fsl_dma_chan_probe(fdev, child, FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, "fsl,elo-dma-channel"); } } /* * Hookup the IRQ handler(s) * * If we have a per-controller interrupt, we prefer that to the * per-channel interrupts to reduce the number of shared interrupt * handlers on the same IRQ line */ err = fsldma_request_irqs(fdev); if (err) { dev_err(fdev->dev, "unable to request IRQs\n"); goto out_free_fdev; } dma_async_device_register(&fdev->common); return 0; out_free_fdev: irq_dispose_mapping(fdev->irq); kfree(fdev); out_return: return err; } static int fsldma_of_remove(struct platform_device *op) { struct fsldma_device *fdev; unsigned int i; fdev = dev_get_drvdata(&op->dev); dma_async_device_unregister(&fdev->common); fsldma_free_irqs(fdev); for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { if (fdev->chan[i]) fsl_dma_chan_remove(fdev->chan[i]); } iounmap(fdev->regs); dev_set_drvdata(&op->dev, NULL); kfree(fdev); return 0; } static const struct of_device_id fsldma_of_ids[] = { { .compatible = "fsl,eloplus-dma", }, { .compatible = "fsl,elo-dma", }, {} }; static struct platform_driver fsldma_of_driver = { .driver = { .name = "fsl-elo-dma", .owner = THIS_MODULE, .of_match_table = fsldma_of_ids, }, .probe = fsldma_of_probe, .remove = fsldma_of_remove, }; /*----------------------------------------------------------------------------*/ /* Module Init / Exit */ /*----------------------------------------------------------------------------*/ static __init int fsldma_init(void) { pr_info("Freescale Elo / Elo Plus DMA driver\n"); return platform_driver_register(&fsldma_of_driver); } static void __exit fsldma_exit(void) { platform_driver_unregister(&fsldma_of_driver); } subsys_initcall(fsldma_init); module_exit(fsldma_exit); MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); MODULE_LICENSE("GPL");
gpl-2.0
srfarias/srfarias-kernel
drivers/acpi/acpica/hwgpe.c
4936
13499
/****************************************************************************** * * Module Name: hwgpe - Low level GPE enable/disable/clear functions * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #define _COMPONENT ACPI_HARDWARE ACPI_MODULE_NAME("hwgpe") #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ /* Local prototypes */ static acpi_status acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, struct acpi_gpe_block_info *gpe_block, void *context); /****************************************************************************** * * FUNCTION: acpi_hw_get_gpe_register_bit * * PARAMETERS: gpe_event_info - Info block for the GPE * gpe_register_info - Info block for the GPE register * * RETURN: Register mask with a one in the GPE bit position * * DESCRIPTION: Compute the register mask for this GPE. One bit is set in the * correct position for the input GPE. * ******************************************************************************/ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info, struct acpi_gpe_register_info *gpe_register_info) { return (u32)1 << (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number); } /****************************************************************************** * * FUNCTION: acpi_hw_low_set_gpe * * PARAMETERS: gpe_event_info - Info block for the GPE to be disabled * action - Enable or disable * * RETURN: Status * * DESCRIPTION: Enable or disable a single GPE in the parent enable register. * ******************************************************************************/ acpi_status acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action) { struct acpi_gpe_register_info *gpe_register_info; acpi_status status; u32 enable_mask; u32 register_bit; ACPI_FUNCTION_ENTRY(); /* Get the info block for the entire GPE register */ gpe_register_info = gpe_event_info->register_info; if (!gpe_register_info) { return (AE_NOT_EXIST); } /* Get current value of the enable register that contains this GPE */ status = acpi_hw_read(&enable_mask, &gpe_register_info->enable_address); if (ACPI_FAILURE(status)) { return (status); } /* Set or clear just the bit that corresponds to this GPE */ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); switch (action) { case ACPI_GPE_CONDITIONAL_ENABLE: /* Only enable if the enable_for_run bit is set */ if (!(register_bit & gpe_register_info->enable_for_run)) { return (AE_BAD_PARAMETER); } /*lint -fallthrough */ case ACPI_GPE_ENABLE: ACPI_SET_BIT(enable_mask, register_bit); break; case ACPI_GPE_DISABLE: ACPI_CLEAR_BIT(enable_mask, register_bit); break; default: ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u\n", action)); return (AE_BAD_PARAMETER); } /* Write the updated enable mask */ status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address); return (status); } /****************************************************************************** * * FUNCTION: acpi_hw_clear_gpe * * PARAMETERS: gpe_event_info - Info block for the GPE to be cleared * * RETURN: Status * * DESCRIPTION: Clear the status bit for a single GPE. * ******************************************************************************/ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info) { struct acpi_gpe_register_info *gpe_register_info; acpi_status status; u32 register_bit; ACPI_FUNCTION_ENTRY(); /* Get the info block for the entire GPE register */ gpe_register_info = gpe_event_info->register_info; if (!gpe_register_info) { return (AE_NOT_EXIST); } /* * Write a one to the appropriate bit in the status register to * clear this GPE. */ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); status = acpi_hw_write(register_bit, &gpe_register_info->status_address); return (status); } /****************************************************************************** * * FUNCTION: acpi_hw_get_gpe_status * * PARAMETERS: gpe_event_info - Info block for the GPE to queried * event_status - Where the GPE status is returned * * RETURN: Status * * DESCRIPTION: Return the status of a single GPE. * ******************************************************************************/ acpi_status acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, acpi_event_status * event_status) { u32 in_byte; u32 register_bit; struct acpi_gpe_register_info *gpe_register_info; acpi_event_status local_event_status = 0; acpi_status status; ACPI_FUNCTION_ENTRY(); if (!event_status) { return (AE_BAD_PARAMETER); } /* Get the info block for the entire GPE register */ gpe_register_info = gpe_event_info->register_info; /* Get the register bitmask for this GPE */ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); /* GPE currently enabled? (enabled for runtime?) */ if (register_bit & gpe_register_info->enable_for_run) { local_event_status |= ACPI_EVENT_FLAG_ENABLED; } /* GPE enabled for wake? */ if (register_bit & gpe_register_info->enable_for_wake) { local_event_status |= ACPI_EVENT_FLAG_WAKE_ENABLED; } /* GPE currently active (status bit == 1)? */ status = acpi_hw_read(&in_byte, &gpe_register_info->status_address); if (ACPI_FAILURE(status)) { return (status); } if (register_bit & in_byte) { local_event_status |= ACPI_EVENT_FLAG_SET; } /* Set return value */ (*event_status) = local_event_status; return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_hw_disable_gpe_block * * PARAMETERS: gpe_xrupt_info - GPE Interrupt info * gpe_block - Gpe Block info * * RETURN: Status * * DESCRIPTION: Disable all GPEs within a single GPE block * ******************************************************************************/ acpi_status acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, struct acpi_gpe_block_info *gpe_block, void *context) { u32 i; acpi_status status; /* Examine each GPE Register within the block */ for (i = 0; i < gpe_block->register_count; i++) { /* Disable all GPEs in this register */ status = acpi_hw_write(0x00, &gpe_block->register_info[i].enable_address); if (ACPI_FAILURE(status)) { return (status); } } return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_hw_clear_gpe_block * * PARAMETERS: gpe_xrupt_info - GPE Interrupt info * gpe_block - Gpe Block info * * RETURN: Status * * DESCRIPTION: Clear status bits for all GPEs within a single GPE block * ******************************************************************************/ acpi_status acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, struct acpi_gpe_block_info *gpe_block, void *context) { u32 i; acpi_status status; /* Examine each GPE Register within the block */ for (i = 0; i < gpe_block->register_count; i++) { /* Clear status on all GPEs in this register */ status = acpi_hw_write(0xFF, &gpe_block->register_info[i].status_address); if (ACPI_FAILURE(status)) { return (status); } } return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_hw_enable_runtime_gpe_block * * PARAMETERS: gpe_xrupt_info - GPE Interrupt info * gpe_block - Gpe Block info * * RETURN: Status * * DESCRIPTION: Enable all "runtime" GPEs within a single GPE block. Includes * combination wake/run GPEs. * ******************************************************************************/ acpi_status acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, struct acpi_gpe_block_info *gpe_block, void *context) { u32 i; acpi_status status; /* NOTE: assumes that all GPEs are currently disabled */ /* Examine each GPE Register within the block */ for (i = 0; i < gpe_block->register_count; i++) { if (!gpe_block->register_info[i].enable_for_run) { continue; } /* Enable all "runtime" GPEs in this register */ status = acpi_hw_write(gpe_block->register_info[i].enable_for_run, &gpe_block->register_info[i].enable_address); if (ACPI_FAILURE(status)) { return (status); } } return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_hw_enable_wakeup_gpe_block * * PARAMETERS: gpe_xrupt_info - GPE Interrupt info * gpe_block - Gpe Block info * * RETURN: Status * * DESCRIPTION: Enable all "wake" GPEs within a single GPE block. Includes * combination wake/run GPEs. * ******************************************************************************/ static acpi_status acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, struct acpi_gpe_block_info *gpe_block, void *context) { u32 i; acpi_status status; /* Examine each GPE Register within the block */ for (i = 0; i < gpe_block->register_count; i++) { if (!gpe_block->register_info[i].enable_for_wake) { continue; } /* Enable all "wake" GPEs in this register */ status = acpi_hw_write(gpe_block->register_info[i].enable_for_wake, &gpe_block->register_info[i].enable_address); if (ACPI_FAILURE(status)) { return (status); } } return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_hw_disable_all_gpes * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Disable and clear all GPEs in all GPE blocks * ******************************************************************************/ acpi_status acpi_hw_disable_all_gpes(void) { acpi_status status; ACPI_FUNCTION_TRACE(hw_disable_all_gpes); status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL); status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL); return_ACPI_STATUS(status); } /****************************************************************************** * * FUNCTION: acpi_hw_enable_all_runtime_gpes * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks * ******************************************************************************/ acpi_status acpi_hw_enable_all_runtime_gpes(void) { acpi_status status; ACPI_FUNCTION_TRACE(hw_enable_all_runtime_gpes); status = acpi_ev_walk_gpe_list(acpi_hw_enable_runtime_gpe_block, NULL); return_ACPI_STATUS(status); } /****************************************************************************** * * FUNCTION: acpi_hw_enable_all_wakeup_gpes * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Enable all "wakeup" GPEs, in all GPE blocks * ******************************************************************************/ acpi_status acpi_hw_enable_all_wakeup_gpes(void) { acpi_status status; ACPI_FUNCTION_TRACE(hw_enable_all_wakeup_gpes); status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block, NULL); return_ACPI_STATUS(status); } #endif /* !ACPI_REDUCED_HARDWARE */
gpl-2.0
FG6Q-Dev/android_kernel_quanta_fg6q
drivers/mtd/nand/cmx270_nand.c
4936
5642
/* * linux/drivers/mtd/nand/cmx270-nand.c * * Copyright (C) 2006 Compulab, Ltd. * Mike Rapoport <mike@compulab.co.il> * * Derived from drivers/mtd/nand/h1910.c * Copyright (C) 2002 Marius Gröger (mag@sysgo.de) * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Overview: * This is a device driver for the NAND flash device found on the * CM-X270 board. */ #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/slab.h> #include <linux/gpio.h> #include <linux/module.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <mach/pxa2xx-regs.h> #define GPIO_NAND_CS (11) #define GPIO_NAND_RB (89) /* MTD structure for CM-X270 board */ static struct mtd_info *cmx270_nand_mtd; /* remaped IO address of the device */ static void __iomem *cmx270_nand_io; /* * Define static partitions for flash device */ static struct mtd_partition partition_info[] = { [0] = { .name = "cmx270-0", .offset = 0, .size = MTDPART_SIZ_FULL } }; #define NUM_PARTITIONS (ARRAY_SIZE(partition_info)) static u_char cmx270_read_byte(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; return (readl(this->IO_ADDR_R) >> 16); } static void cmx270_write_buf(struct mtd_info *mtd, const u_char *buf, int len) { int i; struct nand_chip *this = mtd->priv; for (i=0; i<len; i++) writel((*buf++ << 16), this->IO_ADDR_W); } static void cmx270_read_buf(struct mtd_info *mtd, u_char *buf, int len) { int i; struct nand_chip *this = mtd->priv; for (i=0; i<len; i++) *buf++ = readl(this->IO_ADDR_R) >> 16; } static int cmx270_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) { int i; struct nand_chip *this = mtd->priv; for (i=0; i<len; i++) if (buf[i] != (u_char)(readl(this->IO_ADDR_R) >> 16)) return -EFAULT; return 0; } static inline void nand_cs_on(void) { gpio_set_value(GPIO_NAND_CS, 0); } static void nand_cs_off(void) { dsb(); gpio_set_value(GPIO_NAND_CS, 1); } /* * hardware specific access to control-lines */ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat, unsigned int ctrl) { struct nand_chip* this = mtd->priv; unsigned int nandaddr = (unsigned int)this->IO_ADDR_W; dsb(); if (ctrl & NAND_CTRL_CHANGE) { if ( ctrl & NAND_ALE ) nandaddr |= (1 << 3); else nandaddr &= ~(1 << 3); if ( ctrl & NAND_CLE ) nandaddr |= (1 << 2); else nandaddr &= ~(1 << 2); if ( ctrl & NAND_NCE ) nand_cs_on(); else nand_cs_off(); } dsb(); this->IO_ADDR_W = (void __iomem*)nandaddr; if (dat != NAND_CMD_NONE) writel((dat << 16), this->IO_ADDR_W); dsb(); } /* * read device ready pin */ static int cmx270_device_ready(struct mtd_info *mtd) { dsb(); return (gpio_get_value(GPIO_NAND_RB)); } /* * Main initialization routine */ static int __init cmx270_init(void) { struct nand_chip *this; int ret; if (!(machine_is_armcore() && cpu_is_pxa27x())) return -ENODEV; ret = gpio_request(GPIO_NAND_CS, "NAND CS"); if (ret) { pr_warning("CM-X270: failed to request NAND CS gpio\n"); return ret; } gpio_direction_output(GPIO_NAND_CS, 1); ret = gpio_request(GPIO_NAND_RB, "NAND R/B"); if (ret) { pr_warning("CM-X270: failed to request NAND R/B gpio\n"); goto err_gpio_request; } gpio_direction_input(GPIO_NAND_RB); /* Allocate memory for MTD device structure and private data */ cmx270_nand_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); if (!cmx270_nand_mtd) { pr_debug("Unable to allocate CM-X270 NAND MTD device structure.\n"); ret = -ENOMEM; goto err_kzalloc; } cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12); if (!cmx270_nand_io) { pr_debug("Unable to ioremap NAND device\n"); ret = -EINVAL; goto err_ioremap; } /* Get pointer to private data */ this = (struct nand_chip *)(&cmx270_nand_mtd[1]); /* Link the private data with the MTD structure */ cmx270_nand_mtd->owner = THIS_MODULE; cmx270_nand_mtd->priv = this; /* insert callbacks */ this->IO_ADDR_R = cmx270_nand_io; this->IO_ADDR_W = cmx270_nand_io; this->cmd_ctrl = cmx270_hwcontrol; this->dev_ready = cmx270_device_ready; /* 15 us command delay time */ this->chip_delay = 20; this->ecc.mode = NAND_ECC_SOFT; /* read/write functions */ this->read_byte = cmx270_read_byte; this->read_buf = cmx270_read_buf; this->write_buf = cmx270_write_buf; this->verify_buf = cmx270_verify_buf; /* Scan to find existence of the device */ if (nand_scan (cmx270_nand_mtd, 1)) { pr_notice("No NAND device\n"); ret = -ENXIO; goto err_scan; } /* Register the partitions */ ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, NULL, partition_info, NUM_PARTITIONS); if (ret) goto err_scan; /* Return happy */ return 0; err_scan: iounmap(cmx270_nand_io); err_ioremap: kfree(cmx270_nand_mtd); err_kzalloc: gpio_free(GPIO_NAND_RB); err_gpio_request: gpio_free(GPIO_NAND_CS); return ret; } module_init(cmx270_init); /* * Clean up routine */ static void __exit cmx270_cleanup(void) { /* Release resources, unregister device */ nand_release(cmx270_nand_mtd); gpio_free(GPIO_NAND_RB); gpio_free(GPIO_NAND_CS); iounmap(cmx270_nand_io); /* Free the MTD device structure */ kfree (cmx270_nand_mtd); } module_exit(cmx270_cleanup); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>"); MODULE_DESCRIPTION("NAND flash driver for Compulab CM-X270 Module");
gpl-2.0
cristianomatos/crKernel-mako
drivers/staging/vt6656/wpactl.c
4936
23925
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: wpactl.c * * Purpose: handle wpa supplicant ioctl input/out functions * * Author: Lyndon Chen * * Date: July 28, 2006 * * Functions: * * Revision History: * */ #include "wpactl.h" #include "key.h" #include "mac.h" #include "device.h" #include "wmgr.h" #include "iocmd.h" #include "iowpa.h" #include "control.h" #include "rndis.h" #include "rf.h" /*--------------------- Static Definitions -------------------------*/ #define VIAWGET_WPA_MAX_BUF_SIZE 1024 static const int frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484 }; /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ static int msglevel = MSG_LEVEL_INFO; /*--------------------- Static Functions --------------------------*/ /*--------------------- Export Variables --------------------------*/ static void wpadev_setup(struct net_device *dev) { dev->type = ARPHRD_IEEE80211; dev->hard_header_len = ETH_HLEN; dev->mtu = 2048; dev->addr_len = ETH_ALEN; dev->tx_queue_len = 1000; memset(dev->broadcast, 0xFF, ETH_ALEN); dev->flags = IFF_BROADCAST | IFF_MULTICAST; } /* * Description: * register netdev for wpa supplicant deamon * * Parameters: * In: * pDevice - * enable - * Out: * * Return Value: * */ static int wpa_init_wpadev(PSDevice pDevice) { PSDevice wpadev_priv; struct net_device *dev = pDevice->dev; int ret = 0; pDevice->wpadev = alloc_netdev(sizeof(PSDevice), "vntwpa", wpadev_setup); if (pDevice->wpadev == NULL) return -ENOMEM; wpadev_priv = netdev_priv(pDevice->wpadev); *wpadev_priv = *pDevice; memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, ETH_ALEN); pDevice->wpadev->base_addr = dev->base_addr; pDevice->wpadev->irq = dev->irq; pDevice->wpadev->mem_start = dev->mem_start; pDevice->wpadev->mem_end = dev->mem_end; ret = register_netdev(pDevice->wpadev); if (ret) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: register_netdev(WPA) failed!\n", dev->name); free_netdev(pDevice->wpadev); return -1; } if (pDevice->skb == NULL) { pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz); if (pDevice->skb == NULL) return -ENOMEM; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Registered netdev %s for WPA management\n", dev->name, pDevice->wpadev->name); return 0; } /* * Description: * unregister net_device (wpadev) * * Parameters: * In: * pDevice - * Out: * * Return Value: * */ static int wpa_release_wpadev(PSDevice pDevice) { if (pDevice->skb) { dev_kfree_skb(pDevice->skb); pDevice->skb = NULL; } if (pDevice->wpadev) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", pDevice->dev->name, pDevice->wpadev->name); unregister_netdev(pDevice->wpadev); free_netdev(pDevice->wpadev); pDevice->wpadev = NULL; } return 0; } /* * Description: * Set enable/disable dev for wpa supplicant deamon * * Parameters: * In: * pDevice - * val - * Out: * * Return Value: * */ int wpa_set_wpadev(PSDevice pDevice, int val) { if (val) return wpa_init_wpadev(pDevice); return wpa_release_wpadev(pDevice); } /* * Description: * Set WPA algorithm & keys * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ int wpa_set_keys(PSDevice pDevice, void *ctx, BOOL fcpfkernel) { struct viawget_wpa_param *param = ctx; PSMgmtObject pMgmt = &pDevice->sMgmtObj; DWORD dwKeyIndex = 0; BYTE abyKey[MAX_KEY_LEN]; BYTE abySeq[MAX_KEY_LEN]; QWORD KeyRSC; BYTE byKeyDecMode = KEY_CTL_WEP; int ret = 0; int uu; int ii; if (param->u.wpa_key.alg_name > WPA_ALG_CCMP) return -EINVAL; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d \n", param->u.wpa_key.alg_name); if (param->u.wpa_key.alg_name == WPA_ALG_NONE) { pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled; pDevice->bEncryptionEnable = FALSE; pDevice->byKeyIndex = 0; pDevice->bTransmitKey = FALSE; for (uu=0; uu<MAX_KEY_TABLE; uu++) { MACvDisableKeyEntry(pDevice, uu); } return ret; } if (param->u.wpa_key.key && param->u.wpa_key.key_len > sizeof(abyKey)) return -EINVAL; spin_unlock_irq(&pDevice->lock); if (param->u.wpa_key.key && fcpfkernel) { memcpy(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len); } else { if (param->u.wpa_key.key && copy_from_user(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len)) { spin_lock_irq(&pDevice->lock); return -EINVAL; } } spin_lock_irq(&pDevice->lock); dwKeyIndex = (DWORD)(param->u.wpa_key.key_index); if (param->u.wpa_key.alg_name == WPA_ALG_WEP) { if (dwKeyIndex > 3) { return -EINVAL; } else { if (param->u.wpa_key.set_tx) { pDevice->byKeyIndex = (BYTE)dwKeyIndex; pDevice->bTransmitKey = TRUE; dwKeyIndex |= (1 << 31); } KeybSetDefaultKey( pDevice, &(pDevice->sKey), dwKeyIndex & ~(BIT30 | USE_KEYRSC), param->u.wpa_key.key_len, NULL, abyKey, KEY_CTL_WEP ); } pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled; pDevice->bEncryptionEnable = TRUE; return ret; } if (param->u.wpa_key.seq && param->u.wpa_key.seq_len > sizeof(abySeq)) return -EINVAL; spin_unlock_irq(&pDevice->lock); if (param->u.wpa_key.seq && fcpfkernel) { memcpy(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len); } else { if (param->u.wpa_key.seq && copy_from_user(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len)) { spin_lock_irq(&pDevice->lock); return -EINVAL; } } spin_lock_irq(&pDevice->lock); if (param->u.wpa_key.seq_len > 0) { for (ii = 0 ; ii < param->u.wpa_key.seq_len ; ii++) { if (ii < 4) LODWORD(KeyRSC) |= (abySeq[ii] << (ii * 8)); else HIDWORD(KeyRSC) |= (abySeq[ii] << ((ii-4) * 8)); } dwKeyIndex |= 1 << 29; } if (param->u.wpa_key.key_index >= MAX_GROUP_KEY) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return dwKeyIndex > 3\n"); return -EINVAL; } if (param->u.wpa_key.alg_name == WPA_ALG_TKIP) { pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; } if (param->u.wpa_key.alg_name == WPA_ALG_CCMP) { pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; } if (param->u.wpa_key.set_tx) dwKeyIndex |= (1 << 31); if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) byKeyDecMode = KEY_CTL_CCMP; else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) byKeyDecMode = KEY_CTL_TKIP; else byKeyDecMode = KEY_CTL_WEP; // Fix HCT test that set 256 bits KEY and Ndis802_11Encryption3Enabled if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) { if (param->u.wpa_key.key_len == MAX_KEY_LEN) byKeyDecMode = KEY_CTL_TKIP; else if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN) byKeyDecMode = KEY_CTL_WEP; else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN) byKeyDecMode = KEY_CTL_WEP; } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) { if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN) byKeyDecMode = KEY_CTL_WEP; else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN) byKeyDecMode = KEY_CTL_WEP; } // Check TKIP key length if ((byKeyDecMode == KEY_CTL_TKIP) && (param->u.wpa_key.key_len != MAX_KEY_LEN)) { // TKIP Key must be 256 bits DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return- TKIP Key must be 256 bits!\n"); return -EINVAL; } // Check AES key length if ((byKeyDecMode == KEY_CTL_CCMP) && (param->u.wpa_key.key_len != AES_KEY_LEN)) { // AES Key must be 128 bits DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return - AES Key must be 128 bits\n"); return -EINVAL; } if (is_broadcast_ether_addr(&param->addr[0]) || (param->addr == NULL)) { /* if broadcast, set the key as every key entry's group key */ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Groupe Key Assign.\n"); if ((KeybSetAllGroupKey(pDevice, &(pDevice->sKey), dwKeyIndex, param->u.wpa_key.key_len, (PQWORD) &(KeyRSC), (PBYTE)abyKey, byKeyDecMode ) == TRUE) && (KeybSetDefaultKey(pDevice, &(pDevice->sKey), dwKeyIndex, param->u.wpa_key.key_len, (PQWORD) &(KeyRSC), (PBYTE)abyKey, byKeyDecMode ) == TRUE) ) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "GROUP Key Assign.\n"); } else { return -EINVAL; } } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Assign.\n"); // BSSID not 0xffffffffffff // Pairwise Key can't be WEP if (byKeyDecMode == KEY_CTL_WEP) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key can't be WEP\n"); return -EINVAL; } dwKeyIndex |= (1 << 30); // set pairwise key if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) { //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA - WMAC_CONFIG_IBSS_STA\n")); return -EINVAL; } if (KeybSetKey(pDevice, &(pDevice->sKey), &param->addr[0], dwKeyIndex, param->u.wpa_key.key_len, (PQWORD) &(KeyRSC), (PBYTE)abyKey, byKeyDecMode ) == TRUE) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Set\n"); } else { // Key Table Full if (!compare_ether_addr(&param->addr[0], pDevice->abyBSSID)) { //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -Key Table Full.2\n")); return -EINVAL; } else { // Save Key and configure just before associate/reassociate to BSSID // we do not implement now return -EINVAL; } } } // BSSID not 0xffffffffffff if ((ret == 0) && ((param->u.wpa_key.set_tx) != 0)) { pDevice->byKeyIndex = (BYTE)param->u.wpa_key.key_index; pDevice->bTransmitKey = TRUE; } pDevice->bEncryptionEnable = TRUE; return ret; } /* * Description: * enable wpa auth & mode * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int wpa_set_wpa(PSDevice pDevice, struct viawget_wpa_param *param) { PSMgmtObject pMgmt = &pDevice->sMgmtObj; int ret = 0; pMgmt->eAuthenMode = WMAC_AUTH_OPEN; pMgmt->bShareKeyAlgorithm = FALSE; return ret; } /* * Description: * set disassociate * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int wpa_set_disassociate(PSDevice pDevice, struct viawget_wpa_param *param) { PSMgmtObject pMgmt = &pDevice->sMgmtObj; int ret = 0; spin_lock_irq(&pDevice->lock); if (pDevice->bLinkPass) { if (!memcmp(param->addr, pMgmt->abyCurrBSSID, 6)) bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL); } spin_unlock_irq(&pDevice->lock); return ret; } /* * Description: * enable scan process * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int wpa_set_scan(PSDevice pDevice, struct viawget_wpa_param *param) { int ret = 0; /**set ap_scan=1&&scan_ssid=1 under hidden ssid mode**/ PSMgmtObject pMgmt = &pDevice->sMgmtObj; PWLAN_IE_SSID pItemSSID; printk("wpa_set_scan-->desired [ssid=%s,ssid_len=%d]\n", param->u.scan_req.ssid,param->u.scan_req.ssid_len); // Set the SSID memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1); pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID; pItemSSID->byElementID = WLAN_EID_SSID; memcpy(pItemSSID->abySSID, param->u.scan_req.ssid, param->u.scan_req.ssid_len); pItemSSID->len = param->u.scan_req.ssid_len; spin_lock_irq(&pDevice->lock); BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass); bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID); spin_unlock_irq(&pDevice->lock); return ret; } /* * Description: * get bssid * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int wpa_get_bssid(PSDevice pDevice, struct viawget_wpa_param *param) { PSMgmtObject pMgmt = &pDevice->sMgmtObj; int ret = 0; memcpy(param->u.wpa_associate.bssid, pMgmt->abyCurrBSSID, 6); return ret; } /* * Description: * get bssid * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int wpa_get_ssid(PSDevice pDevice, struct viawget_wpa_param *param) { PSMgmtObject pMgmt = &pDevice->sMgmtObj; PWLAN_IE_SSID pItemSSID; int ret = 0; pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID; memcpy(param->u.wpa_associate.ssid, pItemSSID->abySSID, pItemSSID->len); param->u.wpa_associate.ssid_len = pItemSSID->len; return ret; } /* * Description: * get scan results * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int wpa_get_scan(PSDevice pDevice, struct viawget_wpa_param *param) { struct viawget_scan_result *scan_buf; PSMgmtObject pMgmt = &pDevice->sMgmtObj; PWLAN_IE_SSID pItemSSID; PKnownBSS pBSS; PBYTE pBuf; int ret = 0; u16 count = 0; u16 ii; u16 jj; long ldBm; //James //add //******mike:bubble sort by stronger RSSI*****// PBYTE ptempBSS; ptempBSS = kmalloc(sizeof(KnownBSS), GFP_ATOMIC); if (ptempBSS == NULL) { printk("bubble sort kmalloc memory fail@@@\n"); ret = -ENOMEM; return ret; } for (ii = 0; ii < MAX_BSS_NUM; ii++) { for (jj = 0; jj < MAX_BSS_NUM - ii - 1; jj++) { if ((pMgmt->sBSSList[jj].bActive != TRUE) || ((pMgmt->sBSSList[jj].uRSSI > pMgmt->sBSSList[jj + 1].uRSSI) && (pMgmt->sBSSList[jj + 1].bActive != FALSE))) { memcpy(ptempBSS,&pMgmt->sBSSList[jj], sizeof(KnownBSS)); memcpy(&pMgmt->sBSSList[jj], &pMgmt->sBSSList[jj + 1], sizeof(KnownBSS)); memcpy(&pMgmt->sBSSList[jj + 1], ptempBSS, sizeof(KnownBSS)); } } } kfree(ptempBSS); count = 0; pBSS = &(pMgmt->sBSSList[0]); for (ii = 0; ii < MAX_BSS_NUM; ii++) { pBSS = &(pMgmt->sBSSList[ii]); if (!pBSS->bActive) continue; count++; } pBuf = kcalloc(count, sizeof(struct viawget_scan_result), GFP_ATOMIC); if (pBuf == NULL) { ret = -ENOMEM; return ret; } scan_buf = (struct viawget_scan_result *)pBuf; pBSS = &(pMgmt->sBSSList[0]); for (ii = 0, jj = 0; ii < MAX_BSS_NUM; ii++) { pBSS = &(pMgmt->sBSSList[ii]); if (pBSS->bActive) { if (jj >= count) break; memcpy(scan_buf->bssid, pBSS->abyBSSID, WLAN_BSSID_LEN); pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID; memcpy(scan_buf->ssid, pItemSSID->abySSID, pItemSSID->len); scan_buf->ssid_len = pItemSSID->len; scan_buf->freq = frequency_list[pBSS->uChannel-1]; scan_buf->caps = pBSS->wCapInfo; // DavidWang for sharemode RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm); if (-ldBm < 50) scan_buf->qual = 100; else if (-ldBm > 90) scan_buf->qual = 0; else scan_buf->qual=(40-(-ldBm-50))*100/40; //James //scan_buf->caps = pBSS->wCapInfo; //scan_buf->qual = scan_buf->noise = 0; scan_buf->level = ldBm; //scan_buf->maxrate = if (pBSS->wWPALen != 0) { scan_buf->wpa_ie_len = pBSS->wWPALen; memcpy(scan_buf->wpa_ie, pBSS->byWPAIE, pBSS->wWPALen); } if (pBSS->wRSNLen != 0) { scan_buf->rsn_ie_len = pBSS->wRSNLen; memcpy(scan_buf->rsn_ie, pBSS->byRSNIE, pBSS->wRSNLen); } scan_buf = (struct viawget_scan_result *)((PBYTE)scan_buf + sizeof(struct viawget_scan_result)); jj ++; } } if (jj < count) count = jj; if (copy_to_user(param->u.scan_results.buf, pBuf, sizeof(struct viawget_scan_result) * count)) ret = -EFAULT; param->u.scan_results.scan_count = count; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " param->u.scan_results.scan_count = %d\n", count); kfree(pBuf); return ret; } /* * Description: * set associate with AP * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int wpa_set_associate(PSDevice pDevice, struct viawget_wpa_param *param) { PSMgmtObject pMgmt = &pDevice->sMgmtObj; PWLAN_IE_SSID pItemSSID; BYTE abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; BYTE abyWPAIE[64]; int ret = 0; BOOL bwepEnabled=FALSE; // set key type & algorithm DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise_suite = %d\n", param->u.wpa_associate.pairwise_suite); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "group_suite = %d\n", param->u.wpa_associate.group_suite); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "key_mgmt_suite = %d\n", param->u.wpa_associate.key_mgmt_suite); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "auth_alg = %d\n", param->u.wpa_associate.auth_alg); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "mode = %d\n", param->u.wpa_associate.mode); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming dBm = %d\n", param->u.wpa_associate.roam_dbm); // Davidwang if (param->u.wpa_associate.wpa_ie) { if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE)) return -EINVAL; if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len)) return -EFAULT; } if (param->u.wpa_associate.mode == 1) pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA; else pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA; // set bssid if (memcmp(param->u.wpa_associate.bssid, &abyNullAddr[0], 6) != 0) memcpy(pMgmt->abyDesireBSSID, param->u.wpa_associate.bssid, 6); // set ssid memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1); pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID; pItemSSID->byElementID = WLAN_EID_SSID; pItemSSID->len = param->u.wpa_associate.ssid_len; memcpy(pItemSSID->abySSID, param->u.wpa_associate.ssid, pItemSSID->len); if (param->u.wpa_associate.wpa_ie_len == 0) { if (param->u.wpa_associate.auth_alg & AUTH_ALG_SHARED_KEY) pMgmt->eAuthenMode = WMAC_AUTH_SHAREKEY; else pMgmt->eAuthenMode = WMAC_AUTH_OPEN; } else if (abyWPAIE[0] == RSN_INFO_ELEM) { if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK) pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK; else pMgmt->eAuthenMode = WMAC_AUTH_WPA2; } else { if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_WPA_NONE) pMgmt->eAuthenMode = WMAC_AUTH_WPANONE; else if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK) pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK; else pMgmt->eAuthenMode = WMAC_AUTH_WPA; } switch (param->u.wpa_associate.pairwise_suite) { case CIPHER_CCMP: pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; break; case CIPHER_TKIP: pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; break; case CIPHER_WEP40: case CIPHER_WEP104: pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled; bwepEnabled = TRUE; break; case CIPHER_NONE: if (param->u.wpa_associate.group_suite == CIPHER_CCMP) pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; else pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; break; default: pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled; } pMgmt->Roam_dbm = param->u.wpa_associate.roam_dbm; if (pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) { // @wep-sharekey pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled; pMgmt->bShareKeyAlgorithm = TRUE; } else if (pMgmt->eAuthenMode == WMAC_AUTH_OPEN) { if(bwepEnabled==TRUE) { //@open-wep pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled; } else { // @only open pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled; } } // mike save old encryption status pDevice->eOldEncryptionStatus = pDevice->eEncryptionStatus; if (pDevice->eEncryptionStatus != Ndis802_11EncryptionDisabled) pDevice->bEncryptionEnable = TRUE; else pDevice->bEncryptionEnable = FALSE; if ((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) || ((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && (bwepEnabled==TRUE))) { // mike re-comment:open-wep && sharekey-wep needn't do initial key!! } else { KeyvInitTable(pDevice,&pDevice->sKey); } spin_lock_irq(&pDevice->lock); pDevice->bLinkPass = FALSE; ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW); memset(pMgmt->abyCurrBSSID, 0, 6); pMgmt->eCurrState = WMAC_STATE_IDLE; netif_stop_queue(pDevice->dev); /******* search if ap_scan=2, which is associating request in hidden ssid mode ****/ { PKnownBSS pCurr = NULL; pCurr = BSSpSearchBSSList(pDevice, pMgmt->abyDesireBSSID, pMgmt->abyDesireSSID, pDevice->eConfigPHYMode ); if (pCurr == NULL){ printk("wpa_set_associate---->hidden mode site survey before associate.......\n"); bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID); } } /****************************************************************/ bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL); spin_unlock_irq(&pDevice->lock); return ret; } /* * Description: * wpa_ioctl main function supported for wpa supplicant * * Parameters: * In: * pDevice - * iw_point - * Out: * * Return Value: * */ int wpa_ioctl(PSDevice pDevice, struct iw_point *p) { struct viawget_wpa_param *param; int ret = 0; int wpa_ioctl = 0; if (p->length < sizeof(struct viawget_wpa_param) || p->length > VIAWGET_WPA_MAX_BUF_SIZE || !p->pointer) return -EINVAL; param = kmalloc((int)p->length, GFP_KERNEL); if (param == NULL) return -ENOMEM; if (copy_from_user(param, p->pointer, p->length)) { ret = -EFAULT; goto out; } switch (param->cmd) { case VIAWGET_SET_WPA: ret = wpa_set_wpa(pDevice, param); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_WPA \n"); break; case VIAWGET_SET_KEY: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_KEY \n"); spin_lock_irq(&pDevice->lock); ret = wpa_set_keys(pDevice, param, FALSE); spin_unlock_irq(&pDevice->lock); break; case VIAWGET_SET_SCAN: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_SCAN \n"); ret = wpa_set_scan(pDevice, param); break; case VIAWGET_GET_SCAN: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SCAN\n"); ret = wpa_get_scan(pDevice, param); wpa_ioctl = 1; break; case VIAWGET_GET_SSID: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SSID \n"); ret = wpa_get_ssid(pDevice, param); wpa_ioctl = 1; break; case VIAWGET_GET_BSSID: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_BSSID \n"); ret = wpa_get_bssid(pDevice, param); wpa_ioctl = 1; break; case VIAWGET_SET_ASSOCIATE: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_ASSOCIATE \n"); ret = wpa_set_associate(pDevice, param); break; case VIAWGET_SET_DISASSOCIATE: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DISASSOCIATE \n"); ret = wpa_set_disassociate(pDevice, param); break; case VIAWGET_SET_DROP_UNENCRYPT: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DROP_UNENCRYPT \n"); break; case VIAWGET_SET_DEAUTHENTICATE: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DEAUTHENTICATE \n"); break; default: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ioctl: unknown cmd=%d\n", param->cmd); kfree(param); return -EOPNOTSUPP; } if ((ret == 0) && wpa_ioctl) { if (copy_to_user(p->pointer, param, p->length)) { ret = -EFAULT; goto out; } } out: kfree(param); return ret; }
gpl-2.0
Droid-Concepts/lge_kernel_mako
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
5704
3259
/************************************************************************** * * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "drmP.h" #include "vmwgfx_drv.h" int vmw_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *file_priv; struct vmw_private *dev_priv; if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) { DRM_ERROR("Illegal attempt to mmap old fifo space.\n"); return -EINVAL; } file_priv = filp->private_data; dev_priv = vmw_priv(file_priv->minor->dev); return ttm_bo_mmap(filp, vma, &dev_priv->bdev); } static int vmw_ttm_mem_global_init(struct drm_global_reference *ref) { DRM_INFO("global init.\n"); return ttm_mem_global_init(ref->object); } static void vmw_ttm_mem_global_release(struct drm_global_reference *ref) { ttm_mem_global_release(ref->object); } int vmw_ttm_global_init(struct vmw_private *dev_priv) { struct drm_global_reference *global_ref; int ret; global_ref = &dev_priv->mem_global_ref; global_ref->global_type = DRM_GLOBAL_TTM_MEM; global_ref->size = sizeof(struct ttm_mem_global); global_ref->init = &vmw_ttm_mem_global_init; global_ref->release = &vmw_ttm_mem_global_release; ret = drm_global_item_ref(global_ref); if (unlikely(ret != 0)) { DRM_ERROR("Failed setting up TTM memory accounting.\n"); return ret; } dev_priv->bo_global_ref.mem_glob = dev_priv->mem_global_ref.object; global_ref = &dev_priv->bo_global_ref.ref; global_ref->global_type = DRM_GLOBAL_TTM_BO; global_ref->size = sizeof(struct ttm_bo_global); global_ref->init = &ttm_bo_global_init; global_ref->release = &ttm_bo_global_release; ret = drm_global_item_ref(global_ref); if (unlikely(ret != 0)) { DRM_ERROR("Failed setting up TTM buffer objects.\n"); goto out_no_bo; } return 0; out_no_bo: drm_global_item_unref(&dev_priv->mem_global_ref); return ret; } void vmw_ttm_global_release(struct vmw_private *dev_priv) { drm_global_item_unref(&dev_priv->bo_global_ref.ref); drm_global_item_unref(&dev_priv->mem_global_ref); }
gpl-2.0
SomethingExplosive/android_kernel_asus_flo
drivers/parisc/eisa_eeprom.c
10568
2980
/* * EISA "eeprom" support routines * * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/fs.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/eisa_eeprom.h> #define EISA_EEPROM_MINOR 241 static loff_t eisa_eeprom_llseek(struct file *file, loff_t offset, int origin ) { switch (origin) { case 0: /* nothing to do */ break; case 1: offset += file->f_pos; break; case 2: offset += HPEE_MAX_LENGTH; break; } return (offset >= 0 && offset < HPEE_MAX_LENGTH) ? (file->f_pos = offset) : -EINVAL; } static ssize_t eisa_eeprom_read(struct file * file, char __user *buf, size_t count, loff_t *ppos ) { unsigned char *tmp; ssize_t ret; int i; if (*ppos < 0 || *ppos >= HPEE_MAX_LENGTH) return 0; count = *ppos + count < HPEE_MAX_LENGTH ? count : HPEE_MAX_LENGTH - *ppos; tmp = kmalloc(count, GFP_KERNEL); if (tmp) { for (i = 0; i < count; i++) tmp[i] = readb(eisa_eeprom_addr+(*ppos)++); if (copy_to_user (buf, tmp, count)) ret = -EFAULT; else ret = count; kfree (tmp); } else ret = -ENOMEM; return ret; } static int eisa_eeprom_open(struct inode *inode, struct file *file) { if (file->f_mode & FMODE_WRITE) return -EINVAL; return 0; } static int eisa_eeprom_release(struct inode *inode, struct file *file) { return 0; } /* * The various file operations we support. */ static const struct file_operations eisa_eeprom_fops = { .owner = THIS_MODULE, .llseek = eisa_eeprom_llseek, .read = eisa_eeprom_read, .open = eisa_eeprom_open, .release = eisa_eeprom_release, }; static struct miscdevice eisa_eeprom_dev = { EISA_EEPROM_MINOR, "eisa_eeprom", &eisa_eeprom_fops }; static int __init eisa_eeprom_init(void) { int retval; if (!eisa_eeprom_addr) return -ENODEV; retval = misc_register(&eisa_eeprom_dev); if (retval < 0) { printk(KERN_ERR "EISA EEPROM: cannot register misc device.\n"); return retval; } printk(KERN_INFO "EISA EEPROM at 0x%p\n", eisa_eeprom_addr); return 0; } MODULE_LICENSE("GPL"); module_init(eisa_eeprom_init);
gpl-2.0
jamison904/Galaxy_Note_3
drivers/parisc/eisa_eeprom.c
10568
2980
/* * EISA "eeprom" support routines * * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/fs.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/eisa_eeprom.h> #define EISA_EEPROM_MINOR 241 static loff_t eisa_eeprom_llseek(struct file *file, loff_t offset, int origin ) { switch (origin) { case 0: /* nothing to do */ break; case 1: offset += file->f_pos; break; case 2: offset += HPEE_MAX_LENGTH; break; } return (offset >= 0 && offset < HPEE_MAX_LENGTH) ? (file->f_pos = offset) : -EINVAL; } static ssize_t eisa_eeprom_read(struct file * file, char __user *buf, size_t count, loff_t *ppos ) { unsigned char *tmp; ssize_t ret; int i; if (*ppos < 0 || *ppos >= HPEE_MAX_LENGTH) return 0; count = *ppos + count < HPEE_MAX_LENGTH ? count : HPEE_MAX_LENGTH - *ppos; tmp = kmalloc(count, GFP_KERNEL); if (tmp) { for (i = 0; i < count; i++) tmp[i] = readb(eisa_eeprom_addr+(*ppos)++); if (copy_to_user (buf, tmp, count)) ret = -EFAULT; else ret = count; kfree (tmp); } else ret = -ENOMEM; return ret; } static int eisa_eeprom_open(struct inode *inode, struct file *file) { if (file->f_mode & FMODE_WRITE) return -EINVAL; return 0; } static int eisa_eeprom_release(struct inode *inode, struct file *file) { return 0; } /* * The various file operations we support. */ static const struct file_operations eisa_eeprom_fops = { .owner = THIS_MODULE, .llseek = eisa_eeprom_llseek, .read = eisa_eeprom_read, .open = eisa_eeprom_open, .release = eisa_eeprom_release, }; static struct miscdevice eisa_eeprom_dev = { EISA_EEPROM_MINOR, "eisa_eeprom", &eisa_eeprom_fops }; static int __init eisa_eeprom_init(void) { int retval; if (!eisa_eeprom_addr) return -ENODEV; retval = misc_register(&eisa_eeprom_dev); if (retval < 0) { printk(KERN_ERR "EISA EEPROM: cannot register misc device.\n"); return retval; } printk(KERN_INFO "EISA EEPROM at 0x%p\n", eisa_eeprom_addr); return 0; } MODULE_LICENSE("GPL"); module_init(eisa_eeprom_init);
gpl-2.0
lg-devs/android_kernel_lge_msm8974
drivers/felica/felica_rws.c
73
6999
/* * felica_rws.c * */ /* * INCLUDE FILES FOR MODULE */ #include "felica_rws.h" #include "felica_gpio.h" #include "felica_test.h" /* * DEFINE */ enum{ RWS_AVAILABLE = 0, RWS_NOT_AVAILABLE, }; /* Debug intent */ /* #define FELICA_INTENT "my.andr.u5/.FeliCaTest" */ #define FELICA_INTENT "com.felicanetworks.mfc/com.felicanetworks.adhoc.AdhocReceiver" /* * INTERNAL DEFINITION */ static int isopen = 0; // 0 : No open 1 : Open /* * FUNCTION PROTOTYPE */ static void felica_int_low_work(struct work_struct *data); static DECLARE_DELAYED_WORK(felica_int_work, felica_int_low_work); /* * FUNCTION DEFINITION */ static int invoke_felica_apk(void) { char *argv[] = { "/system/bin/sh","/system/bin/am", "start", "-n", FELICA_INTENT, "--activity-clear-top", NULL }; //static char *envp[] = {FELICA_LD_LIBRARY_PATH,FELICA_BOOTCLASSPATH,FELICA_PATH,NULL }; static char *envp[] = {FELICA_PATH, NULL }; int rc = 0; FELICA_DEBUG_MSG_HIGH("[FELICA_RWS] invoke felica app... \n"); rc = call_usermodehelper( argv[0], argv, envp, UMH_WAIT_EXEC ); FELICA_DEBUG_MSG_HIGH("[FELICA_RWS] felica app result : %d \n", rc); return rc; } static void felica_int_low_work(struct work_struct *data) { int rc = 0; lock_felica_wake_lock(); disable_irq_nosync(gpio_to_irq(felica_get_int_gpio_num())); usermodehelper_enable(); FELICA_DEBUG_MSG_LOW("[FELICA_RWS] felica_int_low_work - start \n"); rc = invoke_felica_apk(); if(rc) { FELICA_DEBUG_MSG_HIGH("[FELICA_RWS] Error - invoke app \n"); unlock_felica_wake_lock(); } FELICA_DEBUG_MSG_LOW("[FELICA_RWS] felica_int_low_work - end \n"); enable_irq(gpio_to_irq(felica_get_int_gpio_num())); } static irqreturn_t felica_int_low_isr(int irq, void *dev_id) { schedule_delayed_work(&felica_int_work,0); return IRQ_HANDLED; } /* * Description : MFC calls this function using close method(void open()) of FileOutputStream class * When this fuction is excuted, set PON to Low. * Input : None * Output : Success : 0 Fail : Other */ static int felica_rws_open (struct inode *inode, struct file *fp) { int rc = 0; if(1 == isopen) { FELICA_DEBUG_MSG_HIGH("[FELICA_RWS] felica_rws_open - already open \n"); return -1; } else { FELICA_DEBUG_MSG_LOW("[FELICA_RWS] felica_rws_open - start \n"); isopen = 1; } rc = felica_gpio_open(felica_get_int_gpio_num(), GPIO_DIRECTION_IN, GPIO_HIGH_VALUE); FELICA_DEBUG_MSG_LOW("[FELICA_RWS] felica_rws_open - end \n"); #ifdef FELICA_FN_DEVICE_TEST FELICA_DEBUG_MSG_LOW("[FELICA_RWS] felica_rws_open - result(%d) \n",result_open_rws); return result_open_rws; #else return rc; #endif } /* * Description: MFC calls this function using read method(int read()) of FileInputStream class * Input: None * Output: INT low : RWS not available INT high : available */ static ssize_t felica_rws_read(struct file *fp, char *buf, size_t count, loff_t *pos) { int rc = 0; int getvalue = GPIO_HIGH_VALUE; /* Default status*/ FELICA_DEBUG_MSG_LOW("[FELICA_RWS] felica_rws_read - start \n"); /* Check error */ if(NULL == fp) { FELICA_DEBUG_MSG_HIGH("[FELICA_RWS] ERROR fp is NULL \n"); return -1; } if(NULL == buf) { FELICA_DEBUG_MSG_HIGH("[FELICA_RWS] ERROR buf is NULL \n"); return -1; } if(1 != count) { FELICA_DEBUG_MSG_HIGH("[FELICA_RWS] ERROR count(%d) \n",count); return -1; } if(NULL == pos) { FELICA_DEBUG_MSG_HIGH("[FELICA_RWS] ERROR pos is NULL \n"); return -1; } /* Get GPIO value */ getvalue = felica_gpio_read(felica_get_int_gpio_num()); if((GPIO_LOW_VALUE != getvalue)&&(GPIO_HIGH_VALUE != getvalue)) { FELICA_DEBUG_MSG_HIGH("[FELICA_RFS] ERROR - getvalue is out of range \n"); return -1; } /* Change GPIO value to RWS value */ getvalue = getvalue ? RWS_AVAILABLE : RWS_NOT_AVAILABLE; FELICA_DEBUG_MSG_MED("[FELICA_RWS] RWS status : %d \n", getvalue); /* Copy value to user memory */ rc = copy_to_user((void *)buf, (void *)&getvalue, count); if(rc) { FELICA_DEBUG_MSG_HIGH("[FELICA_RWS] ERROR - copy_to_user \n"); return rc; } FELICA_DEBUG_MSG_LOW("[FELICA_RWS] felica_rfs_read - end \n"); #ifdef FELICA_FN_DEVICE_TEST FELICA_DEBUG_MSG_LOW("[FELICA_RWS] felica_rfs_read - result(%d) \n",result_read_rws); if(result_read_rws != -1) result_read_rws = count; return result_read_rws; #else return count; #endif } /* * Description : MFC calls this function using close method(void close()) of FileOutputStream class * When this fuction is excuted, set PON to Low. * Input : None * Output : Success : 0 Fail : Other */ static int felica_rws_release (struct inode *inode, struct file *fp) { FELICA_DEBUG_MSG_LOW("[FELICA_RWS] felica_rws_release - start \n"); if(0 == isopen) { FELICA_DEBUG_MSG_HIGH("[FELICA_RWS] felica_rws_release - not open \n"); return -1; } else { isopen = 0; FELICA_DEBUG_MSG_LOW("[FELICA_RWS] felica_rws_release - end \n"); } #ifdef FELICA_FN_DEVICE_TEST FELICA_DEBUG_MSG_LOW("[FELICA_RWS] felica_rws_release - result(%d) \n",result_close_rws); return result_close_rws; #else return 0; #endif } /* * STRUCT DEFINITION */ static struct file_operations felica_rws_fops = { .owner = THIS_MODULE, .open = felica_rws_open, .read = felica_rws_read, .release = felica_rws_release, }; static struct miscdevice felica_rws_device = { .minor = MINOR_NUM_FELICA_RWS, .name = FELICA_RWS_NAME, .fops = &felica_rws_fops, }; static int felica_rws_init(void) { int rc = 0; FELICA_DEBUG_MSG_LOW("[FELICA_RWS] felica_rws_init - start \n"); /* register the device file */ rc = misc_register(&felica_rws_device); if (rc) { FELICA_DEBUG_MSG_HIGH("[FELICA_RWS] FAIL!! can not register felica_int \n"); return rc; } rc= request_irq(gpio_to_irq(felica_get_int_gpio_num()), felica_int_low_isr, IRQF_TRIGGER_FALLING|IRQF_NO_SUSPEND, FELICA_RWS_NAME, NULL); if (rc) { FELICA_DEBUG_MSG_HIGH("[FELICA_RWS] FAIL!! can not request_irq = %d \n",rc); return rc; } /* wake up a device from sleep mode by coming up this interrupts */ irq_set_irq_wake(gpio_to_irq(felica_get_int_gpio_num()),1); FELICA_DEBUG_MSG_LOW("[FELICA_RWS] felica_rws_init - end \n"); return 0; } static void felica_rws_exit(void) { FELICA_DEBUG_MSG_LOW("[FELICA_INT] felica_rws_exit - start \n"); free_irq(gpio_to_irq(felica_get_int_gpio_num()), NULL); misc_deregister(&felica_rws_device); FELICA_DEBUG_MSG_LOW("[FELICA_INT] felica_rws_exit - end \n"); } module_init(felica_rws_init); module_exit(felica_rws_exit); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
mythos234/SimplBacon-CM12-S
drivers/gpu/msm/kgsl_mmu.c
73
23978
/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/export.h> #include <linux/types.h> #include <linux/device.h> #include <linux/spinlock.h> #include <linux/genalloc.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/iommu.h> #include <mach/iommu.h> #include <mach/socinfo.h> #include <linux/types.h> #include "kgsl.h" #include "kgsl_mmu.h" #include "kgsl_gpummu.h" #include "kgsl_device.h" #include "kgsl_sharedmem.h" #include "adreno.h" static enum kgsl_mmutype kgsl_mmu_type; static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable); static int kgsl_cleanup_pt(struct kgsl_pagetable *pt) { int i; struct kgsl_device *device; for (i = 0; i < KGSL_DEVICE_MAX; i++) { device = kgsl_driver.devp[i]; if (device) device->ftbl->cleanup_pt(device, pt); } /* Only the 3d device needs mmu specific pt entries */ device = kgsl_driver.devp[KGSL_DEVICE_3D0]; if (device->mmu.mmu_ops->mmu_cleanup_pt != NULL) device->mmu.mmu_ops->mmu_cleanup_pt(&device->mmu, pt); return 0; } static int kgsl_setup_pt(struct kgsl_pagetable *pt) { int i = 0; int status = 0; struct kgsl_device *device; for (i = 0; i < KGSL_DEVICE_MAX; i++) { device = kgsl_driver.devp[i]; if (device) { status = device->ftbl->setup_pt(device, pt); if (status) goto error_pt; } } /* Only the 3d device needs mmu specific pt entries */ device = kgsl_driver.devp[KGSL_DEVICE_3D0]; if (device->mmu.mmu_ops->mmu_setup_pt != NULL) { status = device->mmu.mmu_ops->mmu_setup_pt(&device->mmu, pt); if (status) { i = KGSL_DEVICE_MAX - 1; goto error_pt; } } return status; error_pt: while (i >= 0) { struct kgsl_device *device = kgsl_driver.devp[i]; if (device) device->ftbl->cleanup_pt(device, pt); i--; } return status; } static void kgsl_destroy_pagetable(struct kref *kref) { struct kgsl_pagetable *pagetable = container_of(kref, struct kgsl_pagetable, refcount); unsigned long flags; spin_lock_irqsave(&kgsl_driver.ptlock, flags); list_del(&pagetable->list); spin_unlock_irqrestore(&kgsl_driver.ptlock, flags); pagetable_remove_sysfs_objects(pagetable); kgsl_cleanup_pt(pagetable); if (pagetable->kgsl_pool) gen_pool_destroy(pagetable->kgsl_pool); if (pagetable->pool) gen_pool_destroy(pagetable->pool); pagetable->pt_ops->mmu_destroy_pagetable(pagetable); kfree(pagetable); } static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable) { if (pagetable) kref_put(&pagetable->refcount, kgsl_destroy_pagetable); } static struct kgsl_pagetable * kgsl_get_pagetable(unsigned long name) { struct kgsl_pagetable *pt, *ret = NULL; unsigned long flags; spin_lock_irqsave(&kgsl_driver.ptlock, flags); list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) { if (name == pt->name && kref_get_unless_zero(&pt->refcount)) { ret = pt; break; } } spin_unlock_irqrestore(&kgsl_driver.ptlock, flags); return ret; } static struct kgsl_pagetable * _get_pt_from_kobj(struct kobject *kobj) { unsigned int ptname; if (!kobj) return NULL; if (kstrtou32(kobj->name, 0, &ptname)) return NULL; return kgsl_get_pagetable(ptname); } static ssize_t sysfs_show_entries(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct kgsl_pagetable *pt; int ret = 0; pt = _get_pt_from_kobj(kobj); if (pt) ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries); kgsl_put_pagetable(pt); return ret; } static ssize_t sysfs_show_mapped(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct kgsl_pagetable *pt; int ret = 0; pt = _get_pt_from_kobj(kobj); if (pt) ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped); kgsl_put_pagetable(pt); return ret; } static ssize_t sysfs_show_va_range(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct kgsl_pagetable *pt; int ret = 0; pt = _get_pt_from_kobj(kobj); if (pt) { ret += snprintf(buf, PAGE_SIZE, "0x%x\n", kgsl_mmu_get_ptsize(pt->mmu)); } kgsl_put_pagetable(pt); return ret; } static ssize_t sysfs_show_max_mapped(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct kgsl_pagetable *pt; int ret = 0; pt = _get_pt_from_kobj(kobj); if (pt) ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped); kgsl_put_pagetable(pt); return ret; } static ssize_t sysfs_show_max_entries(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct kgsl_pagetable *pt; int ret = 0; pt = _get_pt_from_kobj(kobj); if (pt) ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries); kgsl_put_pagetable(pt); return ret; } static struct kobj_attribute attr_entries = { .attr = { .name = "entries", .mode = 0444 }, .show = sysfs_show_entries, .store = NULL, }; static struct kobj_attribute attr_mapped = { .attr = { .name = "mapped", .mode = 0444 }, .show = sysfs_show_mapped, .store = NULL, }; static struct kobj_attribute attr_va_range = { .attr = { .name = "va_range", .mode = 0444 }, .show = sysfs_show_va_range, .store = NULL, }; static struct kobj_attribute attr_max_mapped = { .attr = { .name = "max_mapped", .mode = 0444 }, .show = sysfs_show_max_mapped, .store = NULL, }; static struct kobj_attribute attr_max_entries = { .attr = { .name = "max_entries", .mode = 0444 }, .show = sysfs_show_max_entries, .store = NULL, }; static struct attribute *pagetable_attrs[] = { &attr_entries.attr, &attr_mapped.attr, &attr_va_range.attr, &attr_max_mapped.attr, &attr_max_entries.attr, NULL, }; static struct attribute_group pagetable_attr_group = { .attrs = pagetable_attrs, }; static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable) { if (pagetable->kobj) sysfs_remove_group(pagetable->kobj, &pagetable_attr_group); kobject_put(pagetable->kobj); } static int pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable) { char ptname[16]; int ret = -ENOMEM; snprintf(ptname, sizeof(ptname), "%d", pagetable->name); pagetable->kobj = kobject_create_and_add(ptname, kgsl_driver.ptkobj); if (pagetable->kobj == NULL) goto err; ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group); err: if (ret) { if (pagetable->kobj) kobject_put(pagetable->kobj); pagetable->kobj = NULL; } return ret; } int kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu, phys_addr_t pt_base) { struct kgsl_pagetable *pt; int ptid = -1; if (!mmu->mmu_ops || !mmu->mmu_ops->mmu_pt_equal) return KGSL_MMU_GLOBAL_PT; spin_lock(&kgsl_driver.ptlock); list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) { if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) { ptid = (int) pt->name; break; } } spin_unlock(&kgsl_driver.ptlock); return ptid; } EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase); unsigned int kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu, phys_addr_t pt_base, unsigned int addr) { struct kgsl_pagetable *pt; unsigned int ret = 0; if (!mmu->mmu_ops || !mmu->mmu_ops->mmu_pt_equal) return 0; spin_lock(&kgsl_driver.ptlock); list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) { if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) { if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) { ret = 1; break; } else { pt->fault_addr = (addr & ~(PAGE_SIZE-1)); ret = 0; break; } } } spin_unlock(&kgsl_driver.ptlock); return ret; } EXPORT_SYMBOL(kgsl_mmu_log_fault_addr); int kgsl_mmu_init(struct kgsl_device *device) { int status = 0; struct kgsl_mmu *mmu = &device->mmu; mmu->device = device; status = kgsl_allocate_contiguous(device, &mmu->setstate_memory, PAGE_SIZE); if (status) return status; /* Mark the setstate memory as read only */ mmu->setstate_memory.flags |= KGSL_MEMFLAGS_GPUREADONLY; kgsl_sharedmem_set(device, &mmu->setstate_memory, 0, 0, mmu->setstate_memory.size); if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type) mmu->mmu_ops = &gpummu_ops; else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) { mmu->mmu_ops = &iommu_ops; status = mmu->mmu_ops->mmu_init(mmu); } if (status) kgsl_sharedmem_free(&mmu->setstate_memory); return status; } EXPORT_SYMBOL(kgsl_mmu_init); int kgsl_mmu_start(struct kgsl_device *device) { struct kgsl_mmu *mmu = &device->mmu; if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) { kgsl_regwrite(device, MH_MMU_CONFIG, 0); /* Setup gpuaddr of global mappings */ if (!mmu->setstate_memory.gpuaddr) kgsl_setup_pt(NULL); return 0; } else { return mmu->mmu_ops->mmu_start(mmu); } } EXPORT_SYMBOL(kgsl_mmu_start); static void mh_axi_error(struct kgsl_device *device, const char* type) { unsigned int reg, gpu_err, phys_err; phys_addr_t pt_base; kgsl_regread(device, MH_AXI_ERROR, &reg); pt_base = kgsl_mmu_get_current_ptbase(&device->mmu); /* * Read gpu virtual and physical addresses that * caused the error from the debug data. */ kgsl_regwrite(device, MH_DEBUG_CTRL, 44); kgsl_regread(device, MH_DEBUG_DATA, &gpu_err); kgsl_regwrite(device, MH_DEBUG_CTRL, 45); kgsl_regread(device, MH_DEBUG_DATA, &phys_err); KGSL_MEM_CRIT(device, "axi %s error: %08x pt %pa gpu %08x phys %08x\n", type, reg, &pt_base, gpu_err, phys_err); } void kgsl_mh_intrcallback(struct kgsl_device *device) { unsigned int status = 0; kgsl_regread(device, MH_INTERRUPT_STATUS, &status); if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR) mh_axi_error(device, "read"); if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR) mh_axi_error(device, "write"); if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT) device->mmu.mmu_ops->mmu_pagefault(&device->mmu); status &= KGSL_MMU_INT_MASK; kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status); } EXPORT_SYMBOL(kgsl_mh_intrcallback); static struct kgsl_pagetable * kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu, unsigned int name) { int status = 0; struct kgsl_pagetable *pagetable = NULL; unsigned long flags; unsigned int ptsize; pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL); if (pagetable == NULL) { KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(struct kgsl_pagetable)); return NULL; } kref_init(&pagetable->refcount); spin_lock_init(&pagetable->lock); ptsize = kgsl_mmu_get_ptsize(mmu); pagetable->mmu = mmu; pagetable->name = name; pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(ptsize); pagetable->fault_addr = 0xFFFFFFFF; /* * create a separate kgsl pool for IOMMU, global mappings can be mapped * just once from this pool of the defaultpagetable */ if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) && ((KGSL_MMU_GLOBAL_PT == name) || (KGSL_MMU_PRIV_BANK_TABLE_NAME == name))) { pagetable->kgsl_pool = gen_pool_create(ilog2(SZ_8K), -1); if (pagetable->kgsl_pool == NULL) { KGSL_CORE_ERR("gen_pool_create(%d) failed\n", ilog2(SZ_8K)); goto err_alloc; } if (gen_pool_add(pagetable->kgsl_pool, KGSL_IOMMU_GLOBAL_MEM_BASE, KGSL_IOMMU_GLOBAL_MEM_SIZE, -1)) { KGSL_CORE_ERR("gen_pool_add failed\n"); goto err_kgsl_pool; } } pagetable->pool = gen_pool_create(PAGE_SHIFT, -1); if (pagetable->pool == NULL) { KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT); goto err_kgsl_pool; } if (gen_pool_add(pagetable->pool, kgsl_mmu_get_base_addr(mmu), ptsize, -1)) { KGSL_CORE_ERR("gen_pool_add failed\n"); goto err_pool; } if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type) pagetable->pt_ops = &gpummu_pt_ops; else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) pagetable->pt_ops = &iommu_pt_ops; pagetable->priv = pagetable->pt_ops->mmu_create_pagetable(); if (!pagetable->priv) goto err_pool; status = kgsl_setup_pt(pagetable); if (status) goto err_mmu_create; spin_lock_irqsave(&kgsl_driver.ptlock, flags); list_add(&pagetable->list, &kgsl_driver.pagetable_list); spin_unlock_irqrestore(&kgsl_driver.ptlock, flags); /* Create the sysfs entries */ pagetable_add_sysfs_objects(pagetable); return pagetable; err_mmu_create: pagetable->pt_ops->mmu_destroy_pagetable(pagetable); err_pool: gen_pool_destroy(pagetable->pool); err_kgsl_pool: if (pagetable->kgsl_pool) gen_pool_destroy(pagetable->kgsl_pool); err_alloc: kfree(pagetable); return NULL; } struct kgsl_pagetable *kgsl_mmu_getpagetable(struct kgsl_mmu *mmu, unsigned long name) { struct kgsl_pagetable *pt; if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) return (void *)(-1); if (!kgsl_mmu_is_perprocess(mmu)) name = KGSL_MMU_GLOBAL_PT; pt = kgsl_get_pagetable(name); if (pt == NULL) pt = kgsl_mmu_createpagetableobject(mmu, name); return pt; } void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable) { kgsl_put_pagetable(pagetable); } EXPORT_SYMBOL(kgsl_mmu_putpagetable); int kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id, uint32_t flags) { struct kgsl_device *device = mmu->device; if (!(flags & (KGSL_MMUFLAGS_TLBFLUSH | KGSL_MMUFLAGS_PTUPDATE))) return 0; if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) return 0; else if (device->ftbl->setstate) return device->ftbl->setstate(device, context_id, flags); else if (mmu->mmu_ops->mmu_device_setstate) return mmu->mmu_ops->mmu_device_setstate(mmu, flags); return 0; } EXPORT_SYMBOL(kgsl_setstate); void kgsl_mh_start(struct kgsl_device *device) { struct kgsl_mh *mh = &device->mh; /* force mmu off to for now*/ kgsl_regwrite(device, MH_MMU_CONFIG, 0); /* define physical memory range accessible by the core */ kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base); kgsl_regwrite(device, MH_MMU_MPU_END, mh->mpu_base + mh->mpu_range); kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb); if (mh->mh_intf_cfg1 != 0) kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1, mh->mh_intf_cfg1); if (mh->mh_intf_cfg2 != 0) kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2, mh->mh_intf_cfg2); /* * Interrupts are enabled on a per-device level when * kgsl_pwrctrl_irq() is called */ } EXPORT_SYMBOL(kgsl_mh_start); /** * kgsl_mmu_get_gpuaddr - Assign a memdesc with a gpuadddr from the gen pool * @pagetable - pagetable whose pool is to be used * @memdesc - memdesc to which gpuaddr is assigned * * returns - 0 on success else error code */ int kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable, struct kgsl_memdesc *memdesc) { struct gen_pool *pool = NULL; int size; int page_align = ilog2(PAGE_SIZE); if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) { if (memdesc->sglen == 1) { memdesc->gpuaddr = sg_dma_address(memdesc->sg); if (!memdesc->gpuaddr) memdesc->gpuaddr = sg_phys(memdesc->sg); if (!memdesc->gpuaddr) { KGSL_CORE_ERR("Unable to get a valid physical " "address for memdesc\n"); return -EINVAL; } return 0; } else { KGSL_CORE_ERR("Memory is not contigious " "(sglen = %d)\n", memdesc->sglen); return -EINVAL; } } /* Add space for the guard page when allocating the mmu VA. */ size = memdesc->size; if (kgsl_memdesc_has_guard_page(memdesc)) size += PAGE_SIZE; pool = pagetable->pool; if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) { /* Allocate aligned virtual addresses for iommu. This allows * more efficient pagetable entries if the physical memory * is also aligned. Don't do this for GPUMMU, because * the address space is so small. */ if (kgsl_memdesc_get_align(memdesc) > 0) page_align = kgsl_memdesc_get_align(memdesc); if (kgsl_memdesc_is_global(memdesc)) { /* * Only the default pagetable has a kgsl_pool, and * it is responsible for creating the mapping for * each global buffer. The mapping will be reused * in all other pagetables and it must already exist * when we're creating other pagetables which do not * have a kgsl_pool. */ pool = pagetable->kgsl_pool; if (pool == NULL && memdesc->gpuaddr == 0) { KGSL_CORE_ERR( "No address for global mapping into pt %d\n", pagetable->name); return -EINVAL; } } else if (kgsl_memdesc_use_cpu_map(memdesc)) { if (memdesc->gpuaddr == 0) return -EINVAL; pool = NULL; } } if (pool) { memdesc->gpuaddr = gen_pool_alloc_aligned(pool, size, page_align); if (memdesc->gpuaddr == 0) { KGSL_CORE_ERR("gen_pool_alloc(%d) failed, pool: %s\n", size, (pool == pagetable->kgsl_pool) ? "kgsl_pool" : "general_pool"); KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n", pagetable->name, pagetable->stats.mapped, pagetable->stats.entries); return -ENOMEM; } } return 0; } EXPORT_SYMBOL(kgsl_mmu_get_gpuaddr); int kgsl_mmu_map(struct kgsl_pagetable *pagetable, struct kgsl_memdesc *memdesc) { int ret = 0; int size; unsigned int protflags = kgsl_memdesc_protflags(memdesc); if (!memdesc->gpuaddr) return -EINVAL; /* Only global mappings should be mapped multiple times */ if (!kgsl_memdesc_is_global(memdesc) && (KGSL_MEMDESC_MAPPED & memdesc->priv)) return -EINVAL; if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE) return 0; /* Add space for the guard page when allocating the mmu VA. */ size = memdesc->size; if (kgsl_memdesc_has_guard_page(memdesc)) size += PAGE_SIZE; if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype()) spin_lock(&pagetable->lock); ret = pagetable->pt_ops->mmu_map(pagetable, memdesc, protflags, &pagetable->tlb_flags); if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) spin_lock(&pagetable->lock); if (ret) goto done; /* Keep track of the statistics for the sysfs files */ KGSL_STATS_ADD(1, pagetable->stats.entries, pagetable->stats.max_entries); KGSL_STATS_ADD(size, pagetable->stats.mapped, pagetable->stats.max_mapped); spin_unlock(&pagetable->lock); memdesc->priv |= KGSL_MEMDESC_MAPPED; return 0; done: spin_unlock(&pagetable->lock); return ret; } EXPORT_SYMBOL(kgsl_mmu_map); /** * kgsl_mmu_put_gpuaddr - Free a gpuaddress from memory pool * @pagetable - pagetable whose pool memory is freed from * @memdesc - memdesc whose gpuaddress is freed * * returns - 0 on success else error code */ int kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable, struct kgsl_memdesc *memdesc) { struct gen_pool *pool; int size; if (memdesc->size == 0 || memdesc->gpuaddr == 0) return 0; if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) goto done; /* Add space for the guard page when freeing the mmu VA. */ size = memdesc->size; if (kgsl_memdesc_has_guard_page(memdesc)) size += PAGE_SIZE; pool = pagetable->pool; if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) { if (kgsl_memdesc_is_global(memdesc)) pool = pagetable->kgsl_pool; else if (kgsl_memdesc_use_cpu_map(memdesc)) pool = NULL; } if (pool) gen_pool_free(pool, memdesc->gpuaddr, size); /* * Don't clear the gpuaddr on global mappings because they * may be in use by other pagetables */ done: if (!kgsl_memdesc_is_global(memdesc)) memdesc->gpuaddr = 0; return 0; } EXPORT_SYMBOL(kgsl_mmu_put_gpuaddr); int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable, struct kgsl_memdesc *memdesc) { int size; unsigned int start_addr = 0; unsigned int end_addr = 0; if (memdesc->size == 0 || memdesc->gpuaddr == 0 || !(KGSL_MEMDESC_MAPPED & memdesc->priv)) return -EINVAL; if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) return 0; /* Add space for the guard page when freeing the mmu VA. */ size = memdesc->size; if (kgsl_memdesc_has_guard_page(memdesc)) size += PAGE_SIZE; start_addr = memdesc->gpuaddr; end_addr = (memdesc->gpuaddr + size); if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype()) spin_lock(&pagetable->lock); pagetable->pt_ops->mmu_unmap(pagetable, memdesc, &pagetable->tlb_flags); /* If buffer is unmapped 0 fault addr */ if ((pagetable->fault_addr >= start_addr) && (pagetable->fault_addr < end_addr)) pagetable->fault_addr = 0; if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) spin_lock(&pagetable->lock); /* Remove the statistics */ pagetable->stats.entries--; pagetable->stats.mapped -= size; spin_unlock(&pagetable->lock); if (!kgsl_memdesc_is_global(memdesc)) memdesc->priv &= ~KGSL_MEMDESC_MAPPED; return 0; } EXPORT_SYMBOL(kgsl_mmu_unmap); int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable, struct kgsl_memdesc *memdesc) { int result = -EINVAL; unsigned int gpuaddr = 0; if (memdesc == NULL) { KGSL_CORE_ERR("invalid memdesc\n"); goto error; } /* Not all global mappings are needed for all MMU types */ if (!memdesc->size) return 0; gpuaddr = memdesc->gpuaddr; memdesc->priv |= KGSL_MEMDESC_GLOBAL; result = kgsl_mmu_get_gpuaddr(pagetable, memdesc); if (result) goto error; result = kgsl_mmu_map(pagetable, memdesc); if (result) goto error_put_gpuaddr; /*global mappings must have the same gpu address in all pagetables*/ if (gpuaddr && gpuaddr != memdesc->gpuaddr) { KGSL_CORE_ERR("pt %p addr mismatch phys %pa gpu 0x%0x 0x%08x", pagetable, &memdesc->physaddr, gpuaddr, memdesc->gpuaddr); goto error_unmap; } return result; error_unmap: kgsl_mmu_unmap(pagetable, memdesc); error_put_gpuaddr: kgsl_mmu_put_gpuaddr(pagetable, memdesc); error: return result; } EXPORT_SYMBOL(kgsl_mmu_map_global); int kgsl_mmu_close(struct kgsl_device *device) { struct kgsl_mmu *mmu = &device->mmu; kgsl_sharedmem_free(&mmu->setstate_memory); if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) return 0; else return mmu->mmu_ops->mmu_close(mmu); } EXPORT_SYMBOL(kgsl_mmu_close); int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt, enum kgsl_deviceid id) { unsigned int result = 0; if (pt == NULL) return 0; spin_lock(&pt->lock); if (pt->tlb_flags & (1<<id)) { result = KGSL_MMUFLAGS_TLBFLUSH; pt->tlb_flags &= ~(1<<id); } spin_unlock(&pt->lock); return result; } EXPORT_SYMBOL(kgsl_mmu_pt_get_flags); void kgsl_mmu_ptpool_destroy(void *ptpool) { if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type) kgsl_gpummu_ptpool_destroy(ptpool); ptpool = 0; } EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy); void *kgsl_mmu_ptpool_init(int entries) { if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type) return kgsl_gpummu_ptpool_init(entries); else return (void *)(-1); } EXPORT_SYMBOL(kgsl_mmu_ptpool_init); int kgsl_mmu_enabled(void) { if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type) return 1; else return 0; } EXPORT_SYMBOL(kgsl_mmu_enabled); enum kgsl_mmutype kgsl_mmu_get_mmutype(void) { return kgsl_mmu_type; } EXPORT_SYMBOL(kgsl_mmu_get_mmutype); void kgsl_mmu_set_mmutype(char *mmutype) { /* Set the default MMU - GPU on <=8960 and nothing on >= 8064 */ kgsl_mmu_type = cpu_is_apq8064() ? KGSL_MMU_TYPE_NONE : KGSL_MMU_TYPE_GPU; /* Use the IOMMU if it is found */ if (iommu_present(&platform_bus_type)) kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU; if (mmutype && !strncmp(mmutype, "gpummu", 6)) kgsl_mmu_type = KGSL_MMU_TYPE_GPU; if (iommu_present(&platform_bus_type) && mmutype && !strncmp(mmutype, "iommu", 5)) kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU; if (mmutype && !strncmp(mmutype, "nommu", 5)) kgsl_mmu_type = KGSL_MMU_TYPE_NONE; } EXPORT_SYMBOL(kgsl_mmu_set_mmutype); int kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pt, unsigned int gpuaddr) { if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) return 1; if (gpuaddr >= kgsl_mmu_get_base_addr(pt->mmu) && gpuaddr < kgsl_mmu_get_base_addr(pt->mmu) + kgsl_mmu_get_ptsize(pt->mmu)) return 1; if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU && kgsl_mmu_is_perprocess(pt->mmu)) return (gpuaddr > 0 && gpuaddr < TASK_SIZE); return 0; } EXPORT_SYMBOL(kgsl_mmu_gpuaddr_in_range);
gpl-2.0
lenovo-k900/android_external_busybox
e2fsprogs/old_e2fsprogs/e2p/hashstr.c
329
1233
/* vi: set sw=4 ts=4: */ /* * feature.c --- convert between features and strings * * Copyright (C) 1999 Theodore Ts'o <tytso@mit.edu> * * This file can be redistributed under the terms of the GNU Library General * Public License * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <errno.h> #include "e2p.h" struct hash { int num; const char *string; }; static const struct hash hash_list[] = { { EXT2_HASH_LEGACY, "legacy" }, { EXT2_HASH_HALF_MD4, "half_md4" }, { EXT2_HASH_TEA, "tea" }, { 0, 0 }, }; const char *e2p_hash2string(int num) { const struct hash *p; static char buf[20]; for (p = hash_list; p->string; p++) { if (num == p->num) return p->string; } sprintf(buf, "HASHALG_%d", num); return buf; } /* * Returns the hash algorithm, or -1 on error */ int e2p_string2hash(char *string) { const struct hash *p; char *eptr; int num; for (p = hash_list; p->string; p++) { if (!strcasecmp(string, p->string)) { return p->num; } } if (strncasecmp(string, "HASHALG_", 8)) return -1; if (string[8] == 0) return -1; num = strtol(string+8, &eptr, 10); if (num > 255 || num < 0) return -1; if (*eptr) return -1; return num; }
gpl-2.0
rcrobles/linux-stable-4.3
fs/xfs/xfs_filestream.c
585
11289
/* * Copyright (c) 2006-2007 Silicon Graphics, Inc. * Copyright (c) 2014 Christoph Hellwig. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_sb.h" #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_bmap.h" #include "xfs_bmap_util.h" #include "xfs_alloc.h" #include "xfs_mru_cache.h" #include "xfs_filestream.h" #include "xfs_trace.h" struct xfs_fstrm_item { struct xfs_mru_cache_elem mru; struct xfs_inode *ip; xfs_agnumber_t ag; /* AG in use for this directory */ }; enum xfs_fstrm_alloc { XFS_PICK_USERDATA = 1, XFS_PICK_LOWSPACE = 2, }; /* * Allocation group filestream associations are tracked with per-ag atomic * counters. These counters allow xfs_filestream_pick_ag() to tell whether a * particular AG already has active filestreams associated with it. The mount * point's m_peraglock is used to protect these counters from per-ag array * re-allocation during a growfs operation. When xfs_growfs_data_private() is * about to reallocate the array, it calls xfs_filestream_flush() with the * m_peraglock held in write mode. * * Since xfs_mru_cache_flush() guarantees that all the free functions for all * the cache elements have finished executing before it returns, it's safe for * the free functions to use the atomic counters without m_peraglock protection. * This allows the implementation of xfs_fstrm_free_func() to be agnostic about * whether it was called with the m_peraglock held in read mode, write mode or * not held at all. The race condition this addresses is the following: * * - The work queue scheduler fires and pulls a filestream directory cache * element off the LRU end of the cache for deletion, then gets pre-empted. * - A growfs operation grabs the m_peraglock in write mode, flushes all the * remaining items from the cache and reallocates the mount point's per-ag * array, resetting all the counters to zero. * - The work queue thread resumes and calls the free function for the element * it started cleaning up earlier. In the process it decrements the * filestreams counter for an AG that now has no references. * * With a shrinkfs feature, the above scenario could panic the system. * * All other uses of the following macros should be protected by either the * m_peraglock held in read mode, or the cache's internal locking exposed by the * interval between a call to xfs_mru_cache_lookup() and a call to * xfs_mru_cache_done(). In addition, the m_peraglock must be held in read mode * when new elements are added to the cache. * * Combined, these locking rules ensure that no associations will ever exist in * the cache that reference per-ag array elements that have since been * reallocated. */ int xfs_filestream_peek_ag( xfs_mount_t *mp, xfs_agnumber_t agno) { struct xfs_perag *pag; int ret; pag = xfs_perag_get(mp, agno); ret = atomic_read(&pag->pagf_fstrms); xfs_perag_put(pag); return ret; } static int xfs_filestream_get_ag( xfs_mount_t *mp, xfs_agnumber_t agno) { struct xfs_perag *pag; int ret; pag = xfs_perag_get(mp, agno); ret = atomic_inc_return(&pag->pagf_fstrms); xfs_perag_put(pag); return ret; } static void xfs_filestream_put_ag( xfs_mount_t *mp, xfs_agnumber_t agno) { struct xfs_perag *pag; pag = xfs_perag_get(mp, agno); atomic_dec(&pag->pagf_fstrms); xfs_perag_put(pag); } static void xfs_fstrm_free_func( struct xfs_mru_cache_elem *mru) { struct xfs_fstrm_item *item = container_of(mru, struct xfs_fstrm_item, mru); xfs_filestream_put_ag(item->ip->i_mount, item->ag); trace_xfs_filestream_free(item->ip, item->ag); kmem_free(item); } /* * Scan the AGs starting at startag looking for an AG that isn't in use and has * at least minlen blocks free. */ static int xfs_filestream_pick_ag( struct xfs_inode *ip, xfs_agnumber_t startag, xfs_agnumber_t *agp, int flags, xfs_extlen_t minlen) { struct xfs_mount *mp = ip->i_mount; struct xfs_fstrm_item *item; struct xfs_perag *pag; xfs_extlen_t longest, free = 0, minfree, maxfree = 0; xfs_agnumber_t ag, max_ag = NULLAGNUMBER; int err, trylock, nscan; ASSERT(S_ISDIR(ip->i_d.di_mode)); /* 2% of an AG's blocks must be free for it to be chosen. */ minfree = mp->m_sb.sb_agblocks / 50; ag = startag; *agp = NULLAGNUMBER; /* For the first pass, don't sleep trying to init the per-AG. */ trylock = XFS_ALLOC_FLAG_TRYLOCK; for (nscan = 0; 1; nscan++) { trace_xfs_filestream_scan(ip, ag); pag = xfs_perag_get(mp, ag); if (!pag->pagf_init) { err = xfs_alloc_pagf_init(mp, NULL, ag, trylock); if (err && !trylock) { xfs_perag_put(pag); return err; } } /* Might fail sometimes during the 1st pass with trylock set. */ if (!pag->pagf_init) goto next_ag; /* Keep track of the AG with the most free blocks. */ if (pag->pagf_freeblks > maxfree) { maxfree = pag->pagf_freeblks; max_ag = ag; } /* * The AG reference count does two things: it enforces mutual * exclusion when examining the suitability of an AG in this * loop, and it guards against two filestreams being established * in the same AG as each other. */ if (xfs_filestream_get_ag(mp, ag) > 1) { xfs_filestream_put_ag(mp, ag); goto next_ag; } longest = xfs_alloc_longest_free_extent(mp, pag, xfs_alloc_min_freelist(mp, pag)); if (((minlen && longest >= minlen) || (!minlen && pag->pagf_freeblks >= minfree)) && (!pag->pagf_metadata || !(flags & XFS_PICK_USERDATA) || (flags & XFS_PICK_LOWSPACE))) { /* Break out, retaining the reference on the AG. */ free = pag->pagf_freeblks; xfs_perag_put(pag); *agp = ag; break; } /* Drop the reference on this AG, it's not usable. */ xfs_filestream_put_ag(mp, ag); next_ag: xfs_perag_put(pag); /* Move to the next AG, wrapping to AG 0 if necessary. */ if (++ag >= mp->m_sb.sb_agcount) ag = 0; /* If a full pass of the AGs hasn't been done yet, continue. */ if (ag != startag) continue; /* Allow sleeping in xfs_alloc_pagf_init() on the 2nd pass. */ if (trylock != 0) { trylock = 0; continue; } /* Finally, if lowspace wasn't set, set it for the 3rd pass. */ if (!(flags & XFS_PICK_LOWSPACE)) { flags |= XFS_PICK_LOWSPACE; continue; } /* * Take the AG with the most free space, regardless of whether * it's already in use by another filestream. */ if (max_ag != NULLAGNUMBER) { xfs_filestream_get_ag(mp, max_ag); free = maxfree; *agp = max_ag; break; } /* take AG 0 if none matched */ trace_xfs_filestream_pick(ip, *agp, free, nscan); *agp = 0; return 0; } trace_xfs_filestream_pick(ip, *agp, free, nscan); if (*agp == NULLAGNUMBER) return 0; err = -ENOMEM; item = kmem_alloc(sizeof(*item), KM_MAYFAIL); if (!item) goto out_put_ag; item->ag = *agp; item->ip = ip; err = xfs_mru_cache_insert(mp->m_filestream, ip->i_ino, &item->mru); if (err) { if (err == -EEXIST) err = 0; goto out_free_item; } return 0; out_free_item: kmem_free(item); out_put_ag: xfs_filestream_put_ag(mp, *agp); return err; } static struct xfs_inode * xfs_filestream_get_parent( struct xfs_inode *ip) { struct inode *inode = VFS_I(ip), *dir = NULL; struct dentry *dentry, *parent; dentry = d_find_alias(inode); if (!dentry) goto out; parent = dget_parent(dentry); if (!parent) goto out_dput; dir = igrab(d_inode(parent)); dput(parent); out_dput: dput(dentry); out: return dir ? XFS_I(dir) : NULL; } /* * Find the right allocation group for a file, either by finding an * existing file stream or creating a new one. * * Returns NULLAGNUMBER in case of an error. */ xfs_agnumber_t xfs_filestream_lookup_ag( struct xfs_inode *ip) { struct xfs_mount *mp = ip->i_mount; struct xfs_inode *pip = NULL; xfs_agnumber_t startag, ag = NULLAGNUMBER; struct xfs_mru_cache_elem *mru; ASSERT(S_ISREG(ip->i_d.di_mode)); pip = xfs_filestream_get_parent(ip); if (!pip) return NULLAGNUMBER; mru = xfs_mru_cache_lookup(mp->m_filestream, pip->i_ino); if (mru) { ag = container_of(mru, struct xfs_fstrm_item, mru)->ag; xfs_mru_cache_done(mp->m_filestream); trace_xfs_filestream_lookup(ip, ag); goto out; } /* * Set the starting AG using the rotor for inode32, otherwise * use the directory inode's AG. */ if (mp->m_flags & XFS_MOUNT_32BITINODES) { xfs_agnumber_t rotorstep = xfs_rotorstep; startag = (mp->m_agfrotor / rotorstep) % mp->m_sb.sb_agcount; mp->m_agfrotor = (mp->m_agfrotor + 1) % (mp->m_sb.sb_agcount * rotorstep); } else startag = XFS_INO_TO_AGNO(mp, pip->i_ino); if (xfs_filestream_pick_ag(pip, startag, &ag, 0, 0)) ag = NULLAGNUMBER; out: IRELE(pip); return ag; } /* * Pick a new allocation group for the current file and its file stream. * * This is called when the allocator can't find a suitable extent in the * current AG, and we have to move the stream into a new AG with more space. */ int xfs_filestream_new_ag( struct xfs_bmalloca *ap, xfs_agnumber_t *agp) { struct xfs_inode *ip = ap->ip, *pip; struct xfs_mount *mp = ip->i_mount; xfs_extlen_t minlen = ap->length; xfs_agnumber_t startag = 0; int flags, err = 0; struct xfs_mru_cache_elem *mru; *agp = NULLAGNUMBER; pip = xfs_filestream_get_parent(ip); if (!pip) goto exit; mru = xfs_mru_cache_remove(mp->m_filestream, pip->i_ino); if (mru) { struct xfs_fstrm_item *item = container_of(mru, struct xfs_fstrm_item, mru); startag = (item->ag + 1) % mp->m_sb.sb_agcount; } flags = (ap->userdata ? XFS_PICK_USERDATA : 0) | (ap->flist->xbf_low ? XFS_PICK_LOWSPACE : 0); err = xfs_filestream_pick_ag(pip, startag, agp, flags, minlen); /* * Only free the item here so we skip over the old AG earlier. */ if (mru) xfs_fstrm_free_func(mru); IRELE(pip); exit: if (*agp == NULLAGNUMBER) *agp = 0; return err; } void xfs_filestream_deassociate( struct xfs_inode *ip) { xfs_mru_cache_delete(ip->i_mount->m_filestream, ip->i_ino); } int xfs_filestream_mount( xfs_mount_t *mp) { /* * The filestream timer tunable is currently fixed within the range of * one second to four minutes, with five seconds being the default. The * group count is somewhat arbitrary, but it'd be nice to adhere to the * timer tunable to within about 10 percent. This requires at least 10 * groups. */ return xfs_mru_cache_create(&mp->m_filestream, xfs_fstrm_centisecs * 10, 10, xfs_fstrm_free_func); } void xfs_filestream_unmount( xfs_mount_t *mp) { xfs_mru_cache_destroy(mp->m_filestream); }
gpl-2.0
nmacs/linux-2.6.34.14-atlas
net/netlabel/netlabel_mgmt.c
1097
20541
/* * NetLabel Management Support * * This file defines the management functions for the NetLabel system. The * NetLabel system manages static and dynamic label mappings for network * protocols such as CIPSO and RIPSO. * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/types.h> #include <linux/socket.h> #include <linux/string.h> #include <linux/skbuff.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/slab.h> #include <net/sock.h> #include <net/netlink.h> #include <net/genetlink.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <asm/atomic.h> #include "netlabel_domainhash.h" #include "netlabel_user.h" #include "netlabel_mgmt.h" /* NetLabel configured protocol counter */ atomic_t netlabel_mgmt_protocount = ATOMIC_INIT(0); /* Argument struct for netlbl_domhsh_walk() */ struct netlbl_domhsh_walk_arg { struct netlink_callback *nl_cb; struct sk_buff *skb; u32 seq; }; /* NetLabel Generic NETLINK CIPSOv4 family */ static struct genl_family netlbl_mgmt_gnl_family = { .id = GENL_ID_GENERATE, .hdrsize = 0, .name = NETLBL_NLTYPE_MGMT_NAME, .version = NETLBL_PROTO_VERSION, .maxattr = NLBL_MGMT_A_MAX, }; /* NetLabel Netlink attribute policy */ static const struct nla_policy netlbl_mgmt_genl_policy[NLBL_MGMT_A_MAX + 1] = { [NLBL_MGMT_A_DOMAIN] = { .type = NLA_NUL_STRING }, [NLBL_MGMT_A_PROTOCOL] = { .type = NLA_U32 }, [NLBL_MGMT_A_VERSION] = { .type = NLA_U32 }, [NLBL_MGMT_A_CV4DOI] = { .type = NLA_U32 }, }; /* * Helper Functions */ /** * netlbl_mgmt_add - Handle an ADD message * @info: the Generic NETLINK info block * @audit_info: NetLabel audit information * * Description: * Helper function for the ADD and ADDDEF messages to add the domain mappings * from the message to the hash table. See netlabel.h for a description of the * message format. Returns zero on success, negative values on failure. * */ static int netlbl_mgmt_add_common(struct genl_info *info, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; struct netlbl_dom_map *entry = NULL; struct netlbl_domaddr_map *addrmap = NULL; struct cipso_v4_doi *cipsov4 = NULL; u32 tmp_val; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (entry == NULL) { ret_val = -ENOMEM; goto add_failure; } entry->type = nla_get_u32(info->attrs[NLBL_MGMT_A_PROTOCOL]); if (info->attrs[NLBL_MGMT_A_DOMAIN]) { size_t tmp_size = nla_len(info->attrs[NLBL_MGMT_A_DOMAIN]); entry->domain = kmalloc(tmp_size, GFP_KERNEL); if (entry->domain == NULL) { ret_val = -ENOMEM; goto add_failure; } nla_strlcpy(entry->domain, info->attrs[NLBL_MGMT_A_DOMAIN], tmp_size); } /* NOTE: internally we allow/use a entry->type value of * NETLBL_NLTYPE_ADDRSELECT but we don't currently allow users * to pass that as a protocol value because we need to know the * "real" protocol */ switch (entry->type) { case NETLBL_NLTYPE_UNLABELED: break; case NETLBL_NLTYPE_CIPSOV4: if (!info->attrs[NLBL_MGMT_A_CV4DOI]) goto add_failure; tmp_val = nla_get_u32(info->attrs[NLBL_MGMT_A_CV4DOI]); cipsov4 = cipso_v4_doi_getdef(tmp_val); if (cipsov4 == NULL) goto add_failure; entry->type_def.cipsov4 = cipsov4; break; default: goto add_failure; } if (info->attrs[NLBL_MGMT_A_IPV4ADDR]) { struct in_addr *addr; struct in_addr *mask; struct netlbl_domaddr4_map *map; addrmap = kzalloc(sizeof(*addrmap), GFP_KERNEL); if (addrmap == NULL) { ret_val = -ENOMEM; goto add_failure; } INIT_LIST_HEAD(&addrmap->list4); INIT_LIST_HEAD(&addrmap->list6); if (nla_len(info->attrs[NLBL_MGMT_A_IPV4ADDR]) != sizeof(struct in_addr)) { ret_val = -EINVAL; goto add_failure; } if (nla_len(info->attrs[NLBL_MGMT_A_IPV4MASK]) != sizeof(struct in_addr)) { ret_val = -EINVAL; goto add_failure; } addr = nla_data(info->attrs[NLBL_MGMT_A_IPV4ADDR]); mask = nla_data(info->attrs[NLBL_MGMT_A_IPV4MASK]); map = kzalloc(sizeof(*map), GFP_KERNEL); if (map == NULL) { ret_val = -ENOMEM; goto add_failure; } map->list.addr = addr->s_addr & mask->s_addr; map->list.mask = mask->s_addr; map->list.valid = 1; map->type = entry->type; if (cipsov4) map->type_def.cipsov4 = cipsov4; ret_val = netlbl_af4list_add(&map->list, &addrmap->list4); if (ret_val != 0) { kfree(map); goto add_failure; } entry->type = NETLBL_NLTYPE_ADDRSELECT; entry->type_def.addrsel = addrmap; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) } else if (info->attrs[NLBL_MGMT_A_IPV6ADDR]) { struct in6_addr *addr; struct in6_addr *mask; struct netlbl_domaddr6_map *map; addrmap = kzalloc(sizeof(*addrmap), GFP_KERNEL); if (addrmap == NULL) { ret_val = -ENOMEM; goto add_failure; } INIT_LIST_HEAD(&addrmap->list4); INIT_LIST_HEAD(&addrmap->list6); if (nla_len(info->attrs[NLBL_MGMT_A_IPV6ADDR]) != sizeof(struct in6_addr)) { ret_val = -EINVAL; goto add_failure; } if (nla_len(info->attrs[NLBL_MGMT_A_IPV6MASK]) != sizeof(struct in6_addr)) { ret_val = -EINVAL; goto add_failure; } addr = nla_data(info->attrs[NLBL_MGMT_A_IPV6ADDR]); mask = nla_data(info->attrs[NLBL_MGMT_A_IPV6MASK]); map = kzalloc(sizeof(*map), GFP_KERNEL); if (map == NULL) { ret_val = -ENOMEM; goto add_failure; } ipv6_addr_copy(&map->list.addr, addr); map->list.addr.s6_addr32[0] &= mask->s6_addr32[0]; map->list.addr.s6_addr32[1] &= mask->s6_addr32[1]; map->list.addr.s6_addr32[2] &= mask->s6_addr32[2]; map->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; ipv6_addr_copy(&map->list.mask, mask); map->list.valid = 1; map->type = entry->type; ret_val = netlbl_af6list_add(&map->list, &addrmap->list6); if (ret_val != 0) { kfree(map); goto add_failure; } entry->type = NETLBL_NLTYPE_ADDRSELECT; entry->type_def.addrsel = addrmap; #endif /* IPv6 */ } ret_val = netlbl_domhsh_add(entry, audit_info); if (ret_val != 0) goto add_failure; return 0; add_failure: if (cipsov4) cipso_v4_doi_putdef(cipsov4); if (entry) kfree(entry->domain); kfree(addrmap); kfree(entry); return ret_val; } /** * netlbl_mgmt_listentry - List a NetLabel/LSM domain map entry * @skb: the NETLINK buffer * @entry: the map entry * * Description: * This function is a helper function used by the LISTALL and LISTDEF command * handlers. The caller is responsibile for ensuring that the RCU read lock * is held. Returns zero on success, negative values on failure. * */ static int netlbl_mgmt_listentry(struct sk_buff *skb, struct netlbl_dom_map *entry) { int ret_val = 0; struct nlattr *nla_a; struct nlattr *nla_b; struct netlbl_af4list *iter4; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct netlbl_af6list *iter6; #endif if (entry->domain != NULL) { ret_val = nla_put_string(skb, NLBL_MGMT_A_DOMAIN, entry->domain); if (ret_val != 0) return ret_val; } switch (entry->type) { case NETLBL_NLTYPE_ADDRSELECT: nla_a = nla_nest_start(skb, NLBL_MGMT_A_SELECTORLIST); if (nla_a == NULL) return -ENOMEM; netlbl_af4list_foreach_rcu(iter4, &entry->type_def.addrsel->list4) { struct netlbl_domaddr4_map *map4; struct in_addr addr_struct; nla_b = nla_nest_start(skb, NLBL_MGMT_A_ADDRSELECTOR); if (nla_b == NULL) return -ENOMEM; addr_struct.s_addr = iter4->addr; ret_val = nla_put(skb, NLBL_MGMT_A_IPV4ADDR, sizeof(struct in_addr), &addr_struct); if (ret_val != 0) return ret_val; addr_struct.s_addr = iter4->mask; ret_val = nla_put(skb, NLBL_MGMT_A_IPV4MASK, sizeof(struct in_addr), &addr_struct); if (ret_val != 0) return ret_val; map4 = netlbl_domhsh_addr4_entry(iter4); ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, map4->type); if (ret_val != 0) return ret_val; switch (map4->type) { case NETLBL_NLTYPE_CIPSOV4: ret_val = nla_put_u32(skb, NLBL_MGMT_A_CV4DOI, map4->type_def.cipsov4->doi); if (ret_val != 0) return ret_val; break; } nla_nest_end(skb, nla_b); } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) netlbl_af6list_foreach_rcu(iter6, &entry->type_def.addrsel->list6) { struct netlbl_domaddr6_map *map6; nla_b = nla_nest_start(skb, NLBL_MGMT_A_ADDRSELECTOR); if (nla_b == NULL) return -ENOMEM; ret_val = nla_put(skb, NLBL_MGMT_A_IPV6ADDR, sizeof(struct in6_addr), &iter6->addr); if (ret_val != 0) return ret_val; ret_val = nla_put(skb, NLBL_MGMT_A_IPV6MASK, sizeof(struct in6_addr), &iter6->mask); if (ret_val != 0) return ret_val; map6 = netlbl_domhsh_addr6_entry(iter6); ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, map6->type); if (ret_val != 0) return ret_val; nla_nest_end(skb, nla_b); } #endif /* IPv6 */ nla_nest_end(skb, nla_a); break; case NETLBL_NLTYPE_UNLABELED: ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, entry->type); break; case NETLBL_NLTYPE_CIPSOV4: ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, entry->type); if (ret_val != 0) return ret_val; ret_val = nla_put_u32(skb, NLBL_MGMT_A_CV4DOI, entry->type_def.cipsov4->doi); break; } return ret_val; } /* * NetLabel Command Handlers */ /** * netlbl_mgmt_add - Handle an ADD message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Process a user generated ADD message and add the domains from the message * to the hash table. See netlabel.h for a description of the message format. * Returns zero on success, negative values on failure. * */ static int netlbl_mgmt_add(struct sk_buff *skb, struct genl_info *info) { struct netlbl_audit audit_info; if ((!info->attrs[NLBL_MGMT_A_DOMAIN]) || (!info->attrs[NLBL_MGMT_A_PROTOCOL]) || (info->attrs[NLBL_MGMT_A_IPV4ADDR] && info->attrs[NLBL_MGMT_A_IPV6ADDR]) || (info->attrs[NLBL_MGMT_A_IPV4MASK] && info->attrs[NLBL_MGMT_A_IPV6MASK]) || ((info->attrs[NLBL_MGMT_A_IPV4ADDR] != NULL) ^ (info->attrs[NLBL_MGMT_A_IPV4MASK] != NULL)) || ((info->attrs[NLBL_MGMT_A_IPV6ADDR] != NULL) ^ (info->attrs[NLBL_MGMT_A_IPV6MASK] != NULL))) return -EINVAL; netlbl_netlink_auditinfo(skb, &audit_info); return netlbl_mgmt_add_common(info, &audit_info); } /** * netlbl_mgmt_remove - Handle a REMOVE message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Process a user generated REMOVE message and remove the specified domain * mappings. Returns zero on success, negative values on failure. * */ static int netlbl_mgmt_remove(struct sk_buff *skb, struct genl_info *info) { char *domain; struct netlbl_audit audit_info; if (!info->attrs[NLBL_MGMT_A_DOMAIN]) return -EINVAL; netlbl_netlink_auditinfo(skb, &audit_info); domain = nla_data(info->attrs[NLBL_MGMT_A_DOMAIN]); return netlbl_domhsh_remove(domain, &audit_info); } /** * netlbl_mgmt_listall_cb - netlbl_domhsh_walk() callback for LISTALL * @entry: the domain mapping hash table entry * @arg: the netlbl_domhsh_walk_arg structure * * Description: * This function is designed to be used as a callback to the * netlbl_domhsh_walk() function for use in generating a response for a LISTALL * message. Returns the size of the message on success, negative values on * failure. * */ static int netlbl_mgmt_listall_cb(struct netlbl_dom_map *entry, void *arg) { int ret_val = -ENOMEM; struct netlbl_domhsh_walk_arg *cb_arg = arg; void *data; data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).pid, cb_arg->seq, &netlbl_mgmt_gnl_family, NLM_F_MULTI, NLBL_MGMT_C_LISTALL); if (data == NULL) goto listall_cb_failure; ret_val = netlbl_mgmt_listentry(cb_arg->skb, entry); if (ret_val != 0) goto listall_cb_failure; cb_arg->seq++; return genlmsg_end(cb_arg->skb, data); listall_cb_failure: genlmsg_cancel(cb_arg->skb, data); return ret_val; } /** * netlbl_mgmt_listall - Handle a LISTALL message * @skb: the NETLINK buffer * @cb: the NETLINK callback * * Description: * Process a user generated LISTALL message and dumps the domain hash table in * a form suitable for use in a kernel generated LISTALL message. Returns zero * on success, negative values on failure. * */ static int netlbl_mgmt_listall(struct sk_buff *skb, struct netlink_callback *cb) { struct netlbl_domhsh_walk_arg cb_arg; u32 skip_bkt = cb->args[0]; u32 skip_chain = cb->args[1]; cb_arg.nl_cb = cb; cb_arg.skb = skb; cb_arg.seq = cb->nlh->nlmsg_seq; netlbl_domhsh_walk(&skip_bkt, &skip_chain, netlbl_mgmt_listall_cb, &cb_arg); cb->args[0] = skip_bkt; cb->args[1] = skip_chain; return skb->len; } /** * netlbl_mgmt_adddef - Handle an ADDDEF message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Process a user generated ADDDEF message and respond accordingly. Returns * zero on success, negative values on failure. * */ static int netlbl_mgmt_adddef(struct sk_buff *skb, struct genl_info *info) { struct netlbl_audit audit_info; if ((!info->attrs[NLBL_MGMT_A_PROTOCOL]) || (info->attrs[NLBL_MGMT_A_IPV4ADDR] && info->attrs[NLBL_MGMT_A_IPV6ADDR]) || (info->attrs[NLBL_MGMT_A_IPV4MASK] && info->attrs[NLBL_MGMT_A_IPV6MASK]) || ((info->attrs[NLBL_MGMT_A_IPV4ADDR] != NULL) ^ (info->attrs[NLBL_MGMT_A_IPV4MASK] != NULL)) || ((info->attrs[NLBL_MGMT_A_IPV6ADDR] != NULL) ^ (info->attrs[NLBL_MGMT_A_IPV6MASK] != NULL))) return -EINVAL; netlbl_netlink_auditinfo(skb, &audit_info); return netlbl_mgmt_add_common(info, &audit_info); } /** * netlbl_mgmt_removedef - Handle a REMOVEDEF message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Process a user generated REMOVEDEF message and remove the default domain * mapping. Returns zero on success, negative values on failure. * */ static int netlbl_mgmt_removedef(struct sk_buff *skb, struct genl_info *info) { struct netlbl_audit audit_info; netlbl_netlink_auditinfo(skb, &audit_info); return netlbl_domhsh_remove_default(&audit_info); } /** * netlbl_mgmt_listdef - Handle a LISTDEF message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Process a user generated LISTDEF message and dumps the default domain * mapping in a form suitable for use in a kernel generated LISTDEF message. * Returns zero on success, negative values on failure. * */ static int netlbl_mgmt_listdef(struct sk_buff *skb, struct genl_info *info) { int ret_val = -ENOMEM; struct sk_buff *ans_skb = NULL; void *data; struct netlbl_dom_map *entry; ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (ans_skb == NULL) return -ENOMEM; data = genlmsg_put_reply(ans_skb, info, &netlbl_mgmt_gnl_family, 0, NLBL_MGMT_C_LISTDEF); if (data == NULL) goto listdef_failure; rcu_read_lock(); entry = netlbl_domhsh_getentry(NULL); if (entry == NULL) { ret_val = -ENOENT; goto listdef_failure_lock; } ret_val = netlbl_mgmt_listentry(ans_skb, entry); rcu_read_unlock(); if (ret_val != 0) goto listdef_failure; genlmsg_end(ans_skb, data); return genlmsg_reply(ans_skb, info); listdef_failure_lock: rcu_read_unlock(); listdef_failure: kfree_skb(ans_skb); return ret_val; } /** * netlbl_mgmt_protocols_cb - Write an individual PROTOCOL message response * @skb: the skb to write to * @cb: the NETLINK callback * @protocol: the NetLabel protocol to use in the message * * Description: * This function is to be used in conjunction with netlbl_mgmt_protocols() to * answer a application's PROTOCOLS message. Returns the size of the message * on success, negative values on failure. * */ static int netlbl_mgmt_protocols_cb(struct sk_buff *skb, struct netlink_callback *cb, u32 protocol) { int ret_val = -ENOMEM; void *data; data = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, &netlbl_mgmt_gnl_family, NLM_F_MULTI, NLBL_MGMT_C_PROTOCOLS); if (data == NULL) goto protocols_cb_failure; ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, protocol); if (ret_val != 0) goto protocols_cb_failure; return genlmsg_end(skb, data); protocols_cb_failure: genlmsg_cancel(skb, data); return ret_val; } /** * netlbl_mgmt_protocols - Handle a PROTOCOLS message * @skb: the NETLINK buffer * @cb: the NETLINK callback * * Description: * Process a user generated PROTOCOLS message and respond accordingly. * */ static int netlbl_mgmt_protocols(struct sk_buff *skb, struct netlink_callback *cb) { u32 protos_sent = cb->args[0]; if (protos_sent == 0) { if (netlbl_mgmt_protocols_cb(skb, cb, NETLBL_NLTYPE_UNLABELED) < 0) goto protocols_return; protos_sent++; } if (protos_sent == 1) { if (netlbl_mgmt_protocols_cb(skb, cb, NETLBL_NLTYPE_CIPSOV4) < 0) goto protocols_return; protos_sent++; } protocols_return: cb->args[0] = protos_sent; return skb->len; } /** * netlbl_mgmt_version - Handle a VERSION message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Process a user generated VERSION message and respond accordingly. Returns * zero on success, negative values on failure. * */ static int netlbl_mgmt_version(struct sk_buff *skb, struct genl_info *info) { int ret_val = -ENOMEM; struct sk_buff *ans_skb = NULL; void *data; ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (ans_skb == NULL) return -ENOMEM; data = genlmsg_put_reply(ans_skb, info, &netlbl_mgmt_gnl_family, 0, NLBL_MGMT_C_VERSION); if (data == NULL) goto version_failure; ret_val = nla_put_u32(ans_skb, NLBL_MGMT_A_VERSION, NETLBL_PROTO_VERSION); if (ret_val != 0) goto version_failure; genlmsg_end(ans_skb, data); return genlmsg_reply(ans_skb, info); version_failure: kfree_skb(ans_skb); return ret_val; } /* * NetLabel Generic NETLINK Command Definitions */ static struct genl_ops netlbl_mgmt_genl_ops[] = { { .cmd = NLBL_MGMT_C_ADD, .flags = GENL_ADMIN_PERM, .policy = netlbl_mgmt_genl_policy, .doit = netlbl_mgmt_add, .dumpit = NULL, }, { .cmd = NLBL_MGMT_C_REMOVE, .flags = GENL_ADMIN_PERM, .policy = netlbl_mgmt_genl_policy, .doit = netlbl_mgmt_remove, .dumpit = NULL, }, { .cmd = NLBL_MGMT_C_LISTALL, .flags = 0, .policy = netlbl_mgmt_genl_policy, .doit = NULL, .dumpit = netlbl_mgmt_listall, }, { .cmd = NLBL_MGMT_C_ADDDEF, .flags = GENL_ADMIN_PERM, .policy = netlbl_mgmt_genl_policy, .doit = netlbl_mgmt_adddef, .dumpit = NULL, }, { .cmd = NLBL_MGMT_C_REMOVEDEF, .flags = GENL_ADMIN_PERM, .policy = netlbl_mgmt_genl_policy, .doit = netlbl_mgmt_removedef, .dumpit = NULL, }, { .cmd = NLBL_MGMT_C_LISTDEF, .flags = 0, .policy = netlbl_mgmt_genl_policy, .doit = netlbl_mgmt_listdef, .dumpit = NULL, }, { .cmd = NLBL_MGMT_C_PROTOCOLS, .flags = 0, .policy = netlbl_mgmt_genl_policy, .doit = NULL, .dumpit = netlbl_mgmt_protocols, }, { .cmd = NLBL_MGMT_C_VERSION, .flags = 0, .policy = netlbl_mgmt_genl_policy, .doit = netlbl_mgmt_version, .dumpit = NULL, }, }; /* * NetLabel Generic NETLINK Protocol Functions */ /** * netlbl_mgmt_genl_init - Register the NetLabel management component * * Description: * Register the NetLabel management component with the Generic NETLINK * mechanism. Returns zero on success, negative values on failure. * */ int __init netlbl_mgmt_genl_init(void) { return genl_register_family_with_ops(&netlbl_mgmt_gnl_family, netlbl_mgmt_genl_ops, ARRAY_SIZE(netlbl_mgmt_genl_ops)); }
gpl-2.0
jackzzjack/linux
drivers/mfd/wm8350-core.c
1097
11540
/* * wm8350-core.c -- Device access for Wolfson WM8350 * * Copyright 2007, 2008 Wolfson Microelectronics PLC. * * Author: Liam Girdwood, Mark Brown * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/bug.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/regmap.h> #include <linux/workqueue.h> #include <linux/mfd/wm8350/core.h> #include <linux/mfd/wm8350/audio.h> #include <linux/mfd/wm8350/comparator.h> #include <linux/mfd/wm8350/gpio.h> #include <linux/mfd/wm8350/pmic.h> #include <linux/mfd/wm8350/rtc.h> #include <linux/mfd/wm8350/supply.h> #include <linux/mfd/wm8350/wdt.h> #define WM8350_CLOCK_CONTROL_1 0x28 #define WM8350_AIF_TEST 0x74 /* debug */ #define WM8350_BUS_DEBUG 0 #if WM8350_BUS_DEBUG #define dump(regs, src) do { \ int i_; \ u16 *src_ = src; \ printk(KERN_DEBUG); \ for (i_ = 0; i_ < regs; i_++) \ printk(" 0x%4.4x", *src_++); \ printk("\n"); \ } while (0); #else #define dump(bytes, src) #endif #define WM8350_LOCK_DEBUG 0 #if WM8350_LOCK_DEBUG #define ldbg(format, arg...) printk(format, ## arg) #else #define ldbg(format, arg...) #endif /* * WM8350 Device IO */ static DEFINE_MUTEX(reg_lock_mutex); /* * Safe read, modify, write methods */ int wm8350_clear_bits(struct wm8350 *wm8350, u16 reg, u16 mask) { return regmap_update_bits(wm8350->regmap, reg, mask, 0); } EXPORT_SYMBOL_GPL(wm8350_clear_bits); int wm8350_set_bits(struct wm8350 *wm8350, u16 reg, u16 mask) { return regmap_update_bits(wm8350->regmap, reg, mask, mask); } EXPORT_SYMBOL_GPL(wm8350_set_bits); u16 wm8350_reg_read(struct wm8350 *wm8350, int reg) { unsigned int data; int err; err = regmap_read(wm8350->regmap, reg, &data); if (err) dev_err(wm8350->dev, "read from reg R%d failed\n", reg); return data; } EXPORT_SYMBOL_GPL(wm8350_reg_read); int wm8350_reg_write(struct wm8350 *wm8350, int reg, u16 val) { int ret; ret = regmap_write(wm8350->regmap, reg, val); if (ret) dev_err(wm8350->dev, "write to reg R%d failed\n", reg); return ret; } EXPORT_SYMBOL_GPL(wm8350_reg_write); int wm8350_block_read(struct wm8350 *wm8350, int start_reg, int regs, u16 *dest) { int err = 0; err = regmap_bulk_read(wm8350->regmap, start_reg, dest, regs); if (err) dev_err(wm8350->dev, "block read starting from R%d failed\n", start_reg); return err; } EXPORT_SYMBOL_GPL(wm8350_block_read); int wm8350_block_write(struct wm8350 *wm8350, int start_reg, int regs, u16 *src) { int ret = 0; ret = regmap_bulk_write(wm8350->regmap, start_reg, src, regs); if (ret) dev_err(wm8350->dev, "block write starting at R%d failed\n", start_reg); return ret; } EXPORT_SYMBOL_GPL(wm8350_block_write); /** * wm8350_reg_lock() * * The WM8350 has a hardware lock which can be used to prevent writes to * some registers (generally those which can cause particularly serious * problems if misused). This function enables that lock. */ int wm8350_reg_lock(struct wm8350 *wm8350) { int ret; mutex_lock(&reg_lock_mutex); ldbg(__func__); ret = wm8350_reg_write(wm8350, WM8350_SECURITY, WM8350_LOCK_KEY); if (ret) dev_err(wm8350->dev, "lock failed\n"); wm8350->unlocked = false; mutex_unlock(&reg_lock_mutex); return ret; } EXPORT_SYMBOL_GPL(wm8350_reg_lock); /** * wm8350_reg_unlock() * * The WM8350 has a hardware lock which can be used to prevent writes to * some registers (generally those which can cause particularly serious * problems if misused). This function disables that lock so updates * can be performed. For maximum safety this should be done only when * required. */ int wm8350_reg_unlock(struct wm8350 *wm8350) { int ret; mutex_lock(&reg_lock_mutex); ldbg(__func__); ret = wm8350_reg_write(wm8350, WM8350_SECURITY, WM8350_UNLOCK_KEY); if (ret) dev_err(wm8350->dev, "unlock failed\n"); wm8350->unlocked = true; mutex_unlock(&reg_lock_mutex); return ret; } EXPORT_SYMBOL_GPL(wm8350_reg_unlock); int wm8350_read_auxadc(struct wm8350 *wm8350, int channel, int scale, int vref) { u16 reg, result = 0; if (channel < WM8350_AUXADC_AUX1 || channel > WM8350_AUXADC_TEMP) return -EINVAL; if (channel >= WM8350_AUXADC_USB && channel <= WM8350_AUXADC_TEMP && (scale != 0 || vref != 0)) return -EINVAL; mutex_lock(&wm8350->auxadc_mutex); /* Turn on the ADC */ reg = wm8350_reg_read(wm8350, WM8350_POWER_MGMT_5); wm8350_reg_write(wm8350, WM8350_POWER_MGMT_5, reg | WM8350_AUXADC_ENA); if (scale || vref) { reg = scale << 13; reg |= vref << 12; wm8350_reg_write(wm8350, WM8350_AUX1_READBACK + channel, reg); } reg = wm8350_reg_read(wm8350, WM8350_DIGITISER_CONTROL_1); reg |= 1 << channel | WM8350_AUXADC_POLL; wm8350_reg_write(wm8350, WM8350_DIGITISER_CONTROL_1, reg); /* If a late IRQ left the completion signalled then consume * the completion. */ try_wait_for_completion(&wm8350->auxadc_done); /* We ignore the result of the completion and just check for a * conversion result, allowing us to soldier on if the IRQ * infrastructure is not set up for the chip. */ wait_for_completion_timeout(&wm8350->auxadc_done, msecs_to_jiffies(5)); reg = wm8350_reg_read(wm8350, WM8350_DIGITISER_CONTROL_1); if (reg & WM8350_AUXADC_POLL) dev_err(wm8350->dev, "adc chn %d read timeout\n", channel); else result = wm8350_reg_read(wm8350, WM8350_AUX1_READBACK + channel); /* Turn off the ADC */ reg = wm8350_reg_read(wm8350, WM8350_POWER_MGMT_5); wm8350_reg_write(wm8350, WM8350_POWER_MGMT_5, reg & ~WM8350_AUXADC_ENA); mutex_unlock(&wm8350->auxadc_mutex); return result & WM8350_AUXADC_DATA1_MASK; } EXPORT_SYMBOL_GPL(wm8350_read_auxadc); static irqreturn_t wm8350_auxadc_irq(int irq, void *irq_data) { struct wm8350 *wm8350 = irq_data; complete(&wm8350->auxadc_done); return IRQ_HANDLED; } /* * Register a client device. This is non-fatal since there is no need to * fail the entire device init due to a single platform device failing. */ static void wm8350_client_dev_register(struct wm8350 *wm8350, const char *name, struct platform_device **pdev) { int ret; *pdev = platform_device_alloc(name, -1); if (*pdev == NULL) { dev_err(wm8350->dev, "Failed to allocate %s\n", name); return; } (*pdev)->dev.parent = wm8350->dev; platform_set_drvdata(*pdev, wm8350); ret = platform_device_add(*pdev); if (ret != 0) { dev_err(wm8350->dev, "Failed to register %s: %d\n", name, ret); platform_device_put(*pdev); *pdev = NULL; } } int wm8350_device_init(struct wm8350 *wm8350, int irq, struct wm8350_platform_data *pdata) { int ret; unsigned int id1, id2, mask_rev; unsigned int cust_id, mode, chip_rev; dev_set_drvdata(wm8350->dev, wm8350); /* get WM8350 revision and config mode */ ret = regmap_read(wm8350->regmap, WM8350_RESET_ID, &id1); if (ret != 0) { dev_err(wm8350->dev, "Failed to read ID: %d\n", ret); goto err; } ret = regmap_read(wm8350->regmap, WM8350_ID, &id2); if (ret != 0) { dev_err(wm8350->dev, "Failed to read ID: %d\n", ret); goto err; } ret = regmap_read(wm8350->regmap, WM8350_REVISION, &mask_rev); if (ret != 0) { dev_err(wm8350->dev, "Failed to read revision: %d\n", ret); goto err; } if (id1 != 0x6143) { dev_err(wm8350->dev, "Device with ID %x is not a WM8350\n", id1); ret = -ENODEV; goto err; } mode = (id2 & WM8350_CONF_STS_MASK) >> 10; cust_id = id2 & WM8350_CUST_ID_MASK; chip_rev = (id2 & WM8350_CHIP_REV_MASK) >> 12; dev_info(wm8350->dev, "CONF_STS %d, CUST_ID %d, MASK_REV %d, CHIP_REV %d\n", mode, cust_id, mask_rev, chip_rev); if (cust_id != 0) { dev_err(wm8350->dev, "Unsupported CUST_ID\n"); ret = -ENODEV; goto err; } switch (mask_rev) { case 0: wm8350->pmic.max_dcdc = WM8350_DCDC_6; wm8350->pmic.max_isink = WM8350_ISINK_B; switch (chip_rev) { case WM8350_REV_E: dev_info(wm8350->dev, "WM8350 Rev E\n"); break; case WM8350_REV_F: dev_info(wm8350->dev, "WM8350 Rev F\n"); break; case WM8350_REV_G: dev_info(wm8350->dev, "WM8350 Rev G\n"); wm8350->power.rev_g_coeff = 1; break; case WM8350_REV_H: dev_info(wm8350->dev, "WM8350 Rev H\n"); wm8350->power.rev_g_coeff = 1; break; default: /* For safety we refuse to run on unknown hardware */ dev_err(wm8350->dev, "Unknown WM8350 CHIP_REV\n"); ret = -ENODEV; goto err; } break; case 1: wm8350->pmic.max_dcdc = WM8350_DCDC_4; wm8350->pmic.max_isink = WM8350_ISINK_A; switch (chip_rev) { case 0: dev_info(wm8350->dev, "WM8351 Rev A\n"); wm8350->power.rev_g_coeff = 1; break; case 1: dev_info(wm8350->dev, "WM8351 Rev B\n"); wm8350->power.rev_g_coeff = 1; break; default: dev_err(wm8350->dev, "Unknown WM8351 CHIP_REV\n"); ret = -ENODEV; goto err; } break; case 2: wm8350->pmic.max_dcdc = WM8350_DCDC_6; wm8350->pmic.max_isink = WM8350_ISINK_B; switch (chip_rev) { case 0: dev_info(wm8350->dev, "WM8352 Rev A\n"); wm8350->power.rev_g_coeff = 1; break; default: dev_err(wm8350->dev, "Unknown WM8352 CHIP_REV\n"); ret = -ENODEV; goto err; } break; default: dev_err(wm8350->dev, "Unknown MASK_REV\n"); ret = -ENODEV; goto err; } mutex_init(&wm8350->auxadc_mutex); init_completion(&wm8350->auxadc_done); ret = wm8350_irq_init(wm8350, irq, pdata); if (ret < 0) goto err; if (wm8350->irq_base) { ret = request_threaded_irq(wm8350->irq_base + WM8350_IRQ_AUXADC_DATARDY, NULL, wm8350_auxadc_irq, IRQF_ONESHOT, "auxadc", wm8350); if (ret < 0) dev_warn(wm8350->dev, "Failed to request AUXADC IRQ: %d\n", ret); } if (pdata && pdata->init) { ret = pdata->init(wm8350); if (ret != 0) { dev_err(wm8350->dev, "Platform init() failed: %d\n", ret); goto err_irq; } } wm8350_reg_write(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK, 0x0); wm8350_client_dev_register(wm8350, "wm8350-codec", &(wm8350->codec.pdev)); wm8350_client_dev_register(wm8350, "wm8350-gpio", &(wm8350->gpio.pdev)); wm8350_client_dev_register(wm8350, "wm8350-hwmon", &(wm8350->hwmon.pdev)); wm8350_client_dev_register(wm8350, "wm8350-power", &(wm8350->power.pdev)); wm8350_client_dev_register(wm8350, "wm8350-rtc", &(wm8350->rtc.pdev)); wm8350_client_dev_register(wm8350, "wm8350-wdt", &(wm8350->wdt.pdev)); return 0; err_irq: wm8350_irq_exit(wm8350); err: return ret; } EXPORT_SYMBOL_GPL(wm8350_device_init); void wm8350_device_exit(struct wm8350 *wm8350) { int i; for (i = 0; i < ARRAY_SIZE(wm8350->pmic.led); i++) platform_device_unregister(wm8350->pmic.led[i].pdev); for (i = 0; i < ARRAY_SIZE(wm8350->pmic.pdev); i++) platform_device_unregister(wm8350->pmic.pdev[i]); platform_device_unregister(wm8350->wdt.pdev); platform_device_unregister(wm8350->rtc.pdev); platform_device_unregister(wm8350->power.pdev); platform_device_unregister(wm8350->hwmon.pdev); platform_device_unregister(wm8350->gpio.pdev); platform_device_unregister(wm8350->codec.pdev); if (wm8350->irq_base) free_irq(wm8350->irq_base + WM8350_IRQ_AUXADC_DATARDY, wm8350); wm8350_irq_exit(wm8350); } EXPORT_SYMBOL_GPL(wm8350_device_exit); MODULE_DESCRIPTION("WM8350 AudioPlus PMIC core driver"); MODULE_LICENSE("GPL");
gpl-2.0
hackndev/linux-hnd
arch/m68knommu/lib/muldi3.c
2121
2637
/* muldi3.c extracted from gcc-2.7.2.3/libgcc2.c and gcc-2.7.2.3/longlong.h which is: */ /* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GNU CC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define BITS_PER_UNIT 8 #define SI_TYPE_SIZE 32 #define __BITS4 (SI_TYPE_SIZE / 4) #define __ll_B (1L << (SI_TYPE_SIZE / 2)) #define __ll_lowpart(t) ((USItype) (t) % __ll_B) #define __ll_highpart(t) ((USItype) (t) / __ll_B) #define umul_ppmm(w1, w0, u, v) \ do { \ USItype __x0, __x1, __x2, __x3; \ USItype __ul, __vl, __uh, __vh; \ \ __ul = __ll_lowpart (u); \ __uh = __ll_highpart (u); \ __vl = __ll_lowpart (v); \ __vh = __ll_highpart (v); \ \ __x0 = (USItype) __ul * __vl; \ __x1 = (USItype) __ul * __vh; \ __x2 = (USItype) __uh * __vl; \ __x3 = (USItype) __uh * __vh; \ \ __x1 += __ll_highpart (__x0);/* this can't give carry */ \ __x1 += __x2; /* but this indeed can */ \ if (__x1 < __x2) /* did we get it? */ \ __x3 += __ll_B; /* yes, add it in the proper pos. */ \ \ (w1) = __x3 + __ll_highpart (__x1); \ (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \ } while (0) #define __umulsidi3(u, v) \ ({DIunion __w; \ umul_ppmm (__w.s.high, __w.s.low, u, v); \ __w.ll; }) typedef int SItype __attribute__ ((mode (SI))); typedef unsigned int USItype __attribute__ ((mode (SI))); typedef int DItype __attribute__ ((mode (DI))); typedef int word_type __attribute__ ((mode (__word__))); struct DIstruct {SItype high, low;}; typedef union { struct DIstruct s; DItype ll; } DIunion; DItype __muldi3 (DItype u, DItype v) { DIunion w; DIunion uu, vv; uu.ll = u, vv.ll = v; w.ll = __umulsidi3 (uu.s.low, vv.s.low); w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high + (USItype) uu.s.high * (USItype) vv.s.low); return w.ll; }
gpl-2.0
rickyzhang82/odroid-linux
drivers/platform/x86/acerhdf.c
2377
20371
/* * acerhdf - A driver which monitors the temperature * of the aspire one netbook, turns on/off the fan * as soon as the upper/lower threshold is reached. * * (C) 2009 - Peter Feuerer peter (a) piie.net * http://piie.net * 2009 Borislav Petkov <petkovbb@gmail.com> * * Inspired by and many thanks to: * o acerfand - Rachel Greenham * o acer_ec.pl - Michael Kurz michi.kurz (at) googlemail.com * - Petr Tomasek tomasek (#) etf,cuni,cz * - Carlos Corbacho cathectic (at) gmail.com * o lkml - Matthew Garrett * - Borislav Petkov * - Andreas Mohr * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) "acerhdf: " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/dmi.h> #include <linux/acpi.h> #include <linux/thermal.h> #include <linux/platform_device.h> /* * The driver is started with "kernel mode off" by default. That means, the BIOS * is still in control of the fan. In this mode the driver allows to read the * temperature of the cpu and a userspace tool may take over control of the fan. * If the driver is switched to "kernel mode" (e.g. via module parameter) the * driver is in full control of the fan. If you want the module to be started in * kernel mode by default, define the following: */ #undef START_IN_KERNEL_MODE #define DRV_VER "0.5.24" /* * According to the Atom N270 datasheet, * (http://download.intel.com/design/processor/datashts/320032.pdf) the * CPU's optimal operating limits denoted in junction temperature as * measured by the on-die thermal monitor are within 0 <= Tj <= 90. So, * assume 89°C is critical temperature. */ #define ACERHDF_TEMP_CRIT 89000 #define ACERHDF_FAN_OFF 0 #define ACERHDF_FAN_AUTO 1 /* * No matter what value the user puts into the fanon variable, turn on the fan * at 80 degree Celsius to prevent hardware damage */ #define ACERHDF_MAX_FANON 80000 /* * Maximum interval between two temperature checks is 15 seconds, as the die * can get hot really fast under heavy load (plus we shouldn't forget about * possible impact of _external_ aggressive sources such as heaters, sun etc.) */ #define ACERHDF_MAX_INTERVAL 15 #ifdef START_IN_KERNEL_MODE static int kernelmode = 1; #else static int kernelmode; #endif static unsigned int interval = 10; static unsigned int fanon = 63000; static unsigned int fanoff = 58000; static unsigned int verbose; static unsigned int fanstate = ACERHDF_FAN_AUTO; static char force_bios[16]; static char force_product[16]; static unsigned int prev_interval; static struct thermal_zone_device *thz_dev; static struct thermal_cooling_device *cl_dev; static struct platform_device *acerhdf_dev; module_param(kernelmode, uint, 0); MODULE_PARM_DESC(kernelmode, "Kernel mode fan control on / off"); module_param(interval, uint, 0600); MODULE_PARM_DESC(interval, "Polling interval of temperature check"); module_param(fanon, uint, 0600); MODULE_PARM_DESC(fanon, "Turn the fan on above this temperature"); module_param(fanoff, uint, 0600); MODULE_PARM_DESC(fanoff, "Turn the fan off below this temperature"); module_param(verbose, uint, 0600); MODULE_PARM_DESC(verbose, "Enable verbose dmesg output"); module_param_string(force_bios, force_bios, 16, 0); MODULE_PARM_DESC(force_bios, "Force BIOS version and omit BIOS check"); module_param_string(force_product, force_product, 16, 0); MODULE_PARM_DESC(force_product, "Force BIOS product and omit BIOS check"); /* * cmd_off: to switch the fan completely off and check if the fan is off * cmd_auto: to set the BIOS in control of the fan. The BIOS regulates then * the fan speed depending on the temperature */ struct fancmd { u8 cmd_off; u8 cmd_auto; }; /* BIOS settings */ struct bios_settings_t { const char *vendor; const char *product; const char *version; unsigned char fanreg; unsigned char tempreg; struct fancmd cmd; }; /* Register addresses and values for different BIOS versions */ static const struct bios_settings_t bios_tbl[] = { /* AOA110 */ {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x00} }, {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x00} }, {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0x00} }, {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0x00} }, {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0x00} }, {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0x00} }, {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x00} }, {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x00} }, {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x00} }, /* AOA150 */ {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x1f, 0x00} }, {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x00} }, {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x00} }, {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x00} }, {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x00} }, {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} }, {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} }, {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} }, /* Acer 1410 */ {"Acer", "Aspire 1410", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1410", "v0.3113", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1410", "v0.3115", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1410", "v0.3117", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1410", "v0.3119", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1410", "v0.3120", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1410", "v1.3204", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1410", "v1.3303", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1410", "v1.3308", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1410", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, /* Acer 1810xx */ {"Acer", "Aspire 1810TZ", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810T", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810TZ", "v0.3113", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810T", "v0.3113", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810TZ", "v0.3115", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810T", "v0.3115", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810TZ", "v0.3117", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810T", "v0.3117", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810TZ", "v0.3119", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810T", "v0.3119", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810TZ", "v0.3120", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810T", "v0.3120", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810TZ", "v1.3204", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810T", "v1.3204", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810TZ", "v1.3303", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810T", "v1.3303", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810TZ", "v1.3308", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810T", "v1.3308", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, /* Acer 531 */ {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} }, /* Gateway */ {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} }, {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} }, {"Gateway", "LT31", "v1.3103", 0x55, 0x58, {0x9e, 0x00} }, {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00} }, {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00} }, /* Packard Bell */ {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} }, {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} }, {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} }, {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} }, {"Packard Bell", "DOTMU", "v1.3303", 0x55, 0x58, {0x9e, 0x00} }, {"Packard Bell", "DOTMU", "v0.3120", 0x55, 0x58, {0x9e, 0x00} }, {"Packard Bell", "DOTMU", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, {"Packard Bell", "DOTMU", "v0.3113", 0x55, 0x58, {0x9e, 0x00} }, {"Packard Bell", "DOTMU", "v0.3115", 0x55, 0x58, {0x9e, 0x00} }, {"Packard Bell", "DOTMU", "v0.3117", 0x55, 0x58, {0x9e, 0x00} }, {"Packard Bell", "DOTMU", "v0.3119", 0x55, 0x58, {0x9e, 0x00} }, {"Packard Bell", "DOTMU", "v1.3204", 0x55, 0x58, {0x9e, 0x00} }, {"Packard Bell", "DOTMA", "v1.3201", 0x55, 0x58, {0x9e, 0x00} }, {"Packard Bell", "DOTMA", "v1.3302", 0x55, 0x58, {0x9e, 0x00} }, /* pewpew-terminator */ {"", "", "", 0, 0, {0, 0} } }; static const struct bios_settings_t *bios_cfg __read_mostly; static int acerhdf_get_temp(int *temp) { u8 read_temp; if (ec_read(bios_cfg->tempreg, &read_temp)) return -EINVAL; *temp = read_temp * 1000; return 0; } static int acerhdf_get_fanstate(int *state) { u8 fan; if (ec_read(bios_cfg->fanreg, &fan)) return -EINVAL; if (fan != bios_cfg->cmd.cmd_off) *state = ACERHDF_FAN_AUTO; else *state = ACERHDF_FAN_OFF; return 0; } static void acerhdf_change_fanstate(int state) { unsigned char cmd; if (verbose) pr_notice("fan %s\n", (state == ACERHDF_FAN_OFF) ? "OFF" : "ON"); if ((state != ACERHDF_FAN_OFF) && (state != ACERHDF_FAN_AUTO)) { pr_err("invalid fan state %d requested, setting to auto!\n", state); state = ACERHDF_FAN_AUTO; } cmd = (state == ACERHDF_FAN_OFF) ? bios_cfg->cmd.cmd_off : bios_cfg->cmd.cmd_auto; fanstate = state; ec_write(bios_cfg->fanreg, cmd); } static void acerhdf_check_param(struct thermal_zone_device *thermal) { if (fanon > ACERHDF_MAX_FANON) { pr_err("fanon temperature too high, set to %d\n", ACERHDF_MAX_FANON); fanon = ACERHDF_MAX_FANON; } if (kernelmode && prev_interval != interval) { if (interval > ACERHDF_MAX_INTERVAL) { pr_err("interval too high, set to %d\n", ACERHDF_MAX_INTERVAL); interval = ACERHDF_MAX_INTERVAL; } if (verbose) pr_notice("interval changed to: %d\n", interval); thermal->polling_delay = interval*1000; prev_interval = interval; } } /* * This is the thermal zone callback which does the delayed polling of the fan * state. We do check /sysfs-originating settings here in acerhdf_check_param() * as late as the polling interval is since we can't do that in the respective * accessors of the module parameters. */ static int acerhdf_get_ec_temp(struct thermal_zone_device *thermal, unsigned long *t) { int temp, err = 0; acerhdf_check_param(thermal); err = acerhdf_get_temp(&temp); if (err) return err; if (verbose) pr_notice("temp %d\n", temp); *t = temp; return 0; } static int acerhdf_bind(struct thermal_zone_device *thermal, struct thermal_cooling_device *cdev) { /* if the cooling device is the one from acerhdf bind it */ if (cdev != cl_dev) return 0; if (thermal_zone_bind_cooling_device(thermal, 0, cdev)) { pr_err("error binding cooling dev\n"); return -EINVAL; } return 0; } static int acerhdf_unbind(struct thermal_zone_device *thermal, struct thermal_cooling_device *cdev) { if (cdev != cl_dev) return 0; if (thermal_zone_unbind_cooling_device(thermal, 0, cdev)) { pr_err("error unbinding cooling dev\n"); return -EINVAL; } return 0; } static inline void acerhdf_revert_to_bios_mode(void) { acerhdf_change_fanstate(ACERHDF_FAN_AUTO); kernelmode = 0; if (thz_dev) thz_dev->polling_delay = 0; pr_notice("kernel mode fan control OFF\n"); } static inline void acerhdf_enable_kernelmode(void) { kernelmode = 1; thz_dev->polling_delay = interval*1000; thermal_zone_device_update(thz_dev); pr_notice("kernel mode fan control ON\n"); } static int acerhdf_get_mode(struct thermal_zone_device *thermal, enum thermal_device_mode *mode) { if (verbose) pr_notice("kernel mode fan control %d\n", kernelmode); *mode = (kernelmode) ? THERMAL_DEVICE_ENABLED : THERMAL_DEVICE_DISABLED; return 0; } /* * set operation mode; * enabled: the thermal layer of the kernel takes care about * the temperature and the fan. * disabled: the BIOS takes control of the fan. */ static int acerhdf_set_mode(struct thermal_zone_device *thermal, enum thermal_device_mode mode) { if (mode == THERMAL_DEVICE_DISABLED && kernelmode) acerhdf_revert_to_bios_mode(); else if (mode == THERMAL_DEVICE_ENABLED && !kernelmode) acerhdf_enable_kernelmode(); return 0; } static int acerhdf_get_trip_type(struct thermal_zone_device *thermal, int trip, enum thermal_trip_type *type) { if (trip == 0) *type = THERMAL_TRIP_ACTIVE; return 0; } static int acerhdf_get_trip_temp(struct thermal_zone_device *thermal, int trip, unsigned long *temp) { if (trip == 0) *temp = fanon; return 0; } static int acerhdf_get_crit_temp(struct thermal_zone_device *thermal, unsigned long *temperature) { *temperature = ACERHDF_TEMP_CRIT; return 0; } /* bind callback functions to thermalzone */ static struct thermal_zone_device_ops acerhdf_dev_ops = { .bind = acerhdf_bind, .unbind = acerhdf_unbind, .get_temp = acerhdf_get_ec_temp, .get_mode = acerhdf_get_mode, .set_mode = acerhdf_set_mode, .get_trip_type = acerhdf_get_trip_type, .get_trip_temp = acerhdf_get_trip_temp, .get_crit_temp = acerhdf_get_crit_temp, }; /* * cooling device callback functions * get maximal fan cooling state */ static int acerhdf_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { *state = 1; return 0; } static int acerhdf_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) { int err = 0, tmp; err = acerhdf_get_fanstate(&tmp); if (err) return err; *state = (tmp == ACERHDF_FAN_AUTO) ? 1 : 0; return 0; } /* change current fan state - is overwritten when running in kernel mode */ static int acerhdf_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) { int cur_temp, cur_state, err = 0; if (!kernelmode) return 0; err = acerhdf_get_temp(&cur_temp); if (err) { pr_err("error reading temperature, hand off control to BIOS\n"); goto err_out; } err = acerhdf_get_fanstate(&cur_state); if (err) { pr_err("error reading fan state, hand off control to BIOS\n"); goto err_out; } if (state == 0) { /* turn fan off only if below fanoff temperature */ if ((cur_state == ACERHDF_FAN_AUTO) && (cur_temp < fanoff)) acerhdf_change_fanstate(ACERHDF_FAN_OFF); } else { if (cur_state == ACERHDF_FAN_OFF) acerhdf_change_fanstate(ACERHDF_FAN_AUTO); } return 0; err_out: acerhdf_revert_to_bios_mode(); return -EINVAL; } /* bind fan callbacks to fan device */ static struct thermal_cooling_device_ops acerhdf_cooling_ops = { .get_max_state = acerhdf_get_max_state, .get_cur_state = acerhdf_get_cur_state, .set_cur_state = acerhdf_set_cur_state, }; /* suspend / resume functionality */ static int acerhdf_suspend(struct device *dev) { if (kernelmode) acerhdf_change_fanstate(ACERHDF_FAN_AUTO); if (verbose) pr_notice("going suspend\n"); return 0; } static int __devinit acerhdf_probe(struct platform_device *device) { return 0; } static int acerhdf_remove(struct platform_device *device) { return 0; } static const struct dev_pm_ops acerhdf_pm_ops = { .suspend = acerhdf_suspend, .freeze = acerhdf_suspend, }; static struct platform_driver acerhdf_driver = { .driver = { .name = "acerhdf", .owner = THIS_MODULE, .pm = &acerhdf_pm_ops, }, .probe = acerhdf_probe, .remove = acerhdf_remove, }; /* checks if str begins with start */ static int str_starts_with(const char *str, const char *start) { unsigned long str_len = 0, start_len = 0; str_len = strlen(str); start_len = strlen(start); if (str_len >= start_len && !strncmp(str, start, start_len)) return 1; return 0; } /* check hardware */ static int acerhdf_check_hardware(void) { char const *vendor, *version, *product; const struct bios_settings_t *bt = NULL; /* get BIOS data */ vendor = dmi_get_system_info(DMI_SYS_VENDOR); version = dmi_get_system_info(DMI_BIOS_VERSION); product = dmi_get_system_info(DMI_PRODUCT_NAME); if (!vendor || !version || !product) { pr_err("error getting hardware information\n"); return -EINVAL; } pr_info("Acer Aspire One Fan driver, v.%s\n", DRV_VER); if (force_bios[0]) { version = force_bios; pr_info("forcing BIOS version: %s\n", version); kernelmode = 0; } if (force_product[0]) { product = force_product; pr_info("forcing BIOS product: %s\n", product); kernelmode = 0; } if (verbose) pr_info("BIOS info: %s %s, product: %s\n", vendor, version, product); /* search BIOS version and vendor in BIOS settings table */ for (bt = bios_tbl; bt->vendor[0]; bt++) { /* * check if actual hardware BIOS vendor, product and version * IDs start with the strings of BIOS table entry */ if (str_starts_with(vendor, bt->vendor) && str_starts_with(product, bt->product) && str_starts_with(version, bt->version)) { bios_cfg = bt; break; } } if (!bios_cfg) { pr_err("unknown (unsupported) BIOS version %s/%s/%s, " "please report, aborting!\n", vendor, product, version); return -EINVAL; } /* * if started with kernel mode off, prevent the kernel from switching * off the fan */ if (!kernelmode) { pr_notice("Fan control off, to enable do:\n"); pr_notice("echo -n \"enabled\" > " "/sys/class/thermal/thermal_zone0/mode\n"); } return 0; } static int acerhdf_register_platform(void) { int err = 0; err = platform_driver_register(&acerhdf_driver); if (err) return err; acerhdf_dev = platform_device_alloc("acerhdf", -1); if (!acerhdf_dev) { err = -ENOMEM; goto err_device_alloc; } err = platform_device_add(acerhdf_dev); if (err) goto err_device_add; return 0; err_device_add: platform_device_put(acerhdf_dev); err_device_alloc: platform_driver_unregister(&acerhdf_driver); return err; } static void acerhdf_unregister_platform(void) { platform_device_unregister(acerhdf_dev); platform_driver_unregister(&acerhdf_driver); } static int acerhdf_register_thermal(void) { cl_dev = thermal_cooling_device_register("acerhdf-fan", NULL, &acerhdf_cooling_ops); if (IS_ERR(cl_dev)) return -EINVAL; thz_dev = thermal_zone_device_register("acerhdf", 1, NULL, &acerhdf_dev_ops, 0, 0, 0, (kernelmode) ? interval*1000 : 0); if (IS_ERR(thz_dev)) return -EINVAL; return 0; } static void acerhdf_unregister_thermal(void) { if (cl_dev) { thermal_cooling_device_unregister(cl_dev); cl_dev = NULL; } if (thz_dev) { thermal_zone_device_unregister(thz_dev); thz_dev = NULL; } } static int __init acerhdf_init(void) { int err = 0; err = acerhdf_check_hardware(); if (err) goto out_err; err = acerhdf_register_platform(); if (err) goto out_err; err = acerhdf_register_thermal(); if (err) goto err_unreg; return 0; err_unreg: acerhdf_unregister_thermal(); acerhdf_unregister_platform(); out_err: return err; } static void __exit acerhdf_exit(void) { acerhdf_change_fanstate(ACERHDF_FAN_AUTO); acerhdf_unregister_thermal(); acerhdf_unregister_platform(); } MODULE_LICENSE("GPL"); MODULE_AUTHOR("Peter Feuerer"); MODULE_DESCRIPTION("Aspire One temperature and fan driver"); MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:"); MODULE_ALIAS("dmi:*:*Acer*:pnAspire 1410*:"); MODULE_ALIAS("dmi:*:*Acer*:pnAspire 1810*:"); MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:"); MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:"); MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:"); MODULE_ALIAS("dmi:*:*Packard Bell*:pnAOA*:"); MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOA*:"); MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOTMU*:"); MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOTMA*:"); module_init(acerhdf_init); module_exit(acerhdf_exit);
gpl-2.0
croniccorey/OnePlus2-Kernel
drivers/mfd/max8925-core.c
2377
23793
/* * Base driver for Maxim MAX8925 * * Copyright (C) 2009-2010 Marvell International Ltd. * Haojian Zhuang <haojian.zhuang@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/platform_device.h> #include <linux/regulator/machine.h> #include <linux/mfd/core.h> #include <linux/mfd/max8925.h> #include <linux/of.h> #include <linux/of_platform.h> static struct resource bk_resources[] = { { 0x84, 0x84, "mode control", IORESOURCE_REG, }, { 0x85, 0x85, "control", IORESOURCE_REG, }, }; static struct mfd_cell bk_devs[] = { { .name = "max8925-backlight", .num_resources = ARRAY_SIZE(bk_resources), .resources = &bk_resources[0], .id = -1, }, }; static struct resource touch_resources[] = { { .name = "max8925-tsc", .start = MAX8925_TSC_IRQ, .end = MAX8925_ADC_RES_END, .flags = IORESOURCE_REG, }, }; static struct mfd_cell touch_devs[] = { { .name = "max8925-touch", .num_resources = 1, .resources = &touch_resources[0], .id = -1, }, }; static struct resource power_supply_resources[] = { { .name = "max8925-power", .start = MAX8925_CHG_IRQ1, .end = MAX8925_CHG_IRQ1_MASK, .flags = IORESOURCE_REG, }, }; static struct mfd_cell power_devs[] = { { .name = "max8925-power", .num_resources = 1, .resources = &power_supply_resources[0], .id = -1, }, }; static struct resource rtc_resources[] = { { .name = "max8925-rtc", .start = MAX8925_IRQ_RTC_ALARM0, .end = MAX8925_IRQ_RTC_ALARM0, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell rtc_devs[] = { { .name = "max8925-rtc", .num_resources = 1, .resources = &rtc_resources[0], .id = -1, }, }; static struct resource onkey_resources[] = { { .name = "max8925-onkey", .start = MAX8925_IRQ_GPM_SW_R, .end = MAX8925_IRQ_GPM_SW_R, .flags = IORESOURCE_IRQ, }, { .name = "max8925-onkey", .start = MAX8925_IRQ_GPM_SW_F, .end = MAX8925_IRQ_GPM_SW_F, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell onkey_devs[] = { { .name = "max8925-onkey", .num_resources = 2, .resources = &onkey_resources[0], .id = -1, }, }; static struct resource sd1_resources[] = { {0x06, 0x06, "sdv", IORESOURCE_REG, }, }; static struct resource sd2_resources[] = { {0x09, 0x09, "sdv", IORESOURCE_REG, }, }; static struct resource sd3_resources[] = { {0x0c, 0x0c, "sdv", IORESOURCE_REG, }, }; static struct resource ldo1_resources[] = { {0x1a, 0x1a, "ldov", IORESOURCE_REG, }, }; static struct resource ldo2_resources[] = { {0x1e, 0x1e, "ldov", IORESOURCE_REG, }, }; static struct resource ldo3_resources[] = { {0x22, 0x22, "ldov", IORESOURCE_REG, }, }; static struct resource ldo4_resources[] = { {0x26, 0x26, "ldov", IORESOURCE_REG, }, }; static struct resource ldo5_resources[] = { {0x2a, 0x2a, "ldov", IORESOURCE_REG, }, }; static struct resource ldo6_resources[] = { {0x2e, 0x2e, "ldov", IORESOURCE_REG, }, }; static struct resource ldo7_resources[] = { {0x32, 0x32, "ldov", IORESOURCE_REG, }, }; static struct resource ldo8_resources[] = { {0x36, 0x36, "ldov", IORESOURCE_REG, }, }; static struct resource ldo9_resources[] = { {0x3a, 0x3a, "ldov", IORESOURCE_REG, }, }; static struct resource ldo10_resources[] = { {0x3e, 0x3e, "ldov", IORESOURCE_REG, }, }; static struct resource ldo11_resources[] = { {0x42, 0x42, "ldov", IORESOURCE_REG, }, }; static struct resource ldo12_resources[] = { {0x46, 0x46, "ldov", IORESOURCE_REG, }, }; static struct resource ldo13_resources[] = { {0x4a, 0x4a, "ldov", IORESOURCE_REG, }, }; static struct resource ldo14_resources[] = { {0x4e, 0x4e, "ldov", IORESOURCE_REG, }, }; static struct resource ldo15_resources[] = { {0x52, 0x52, "ldov", IORESOURCE_REG, }, }; static struct resource ldo16_resources[] = { {0x12, 0x12, "ldov", IORESOURCE_REG, }, }; static struct resource ldo17_resources[] = { {0x16, 0x16, "ldov", IORESOURCE_REG, }, }; static struct resource ldo18_resources[] = { {0x74, 0x74, "ldov", IORESOURCE_REG, }, }; static struct resource ldo19_resources[] = { {0x5e, 0x5e, "ldov", IORESOURCE_REG, }, }; static struct resource ldo20_resources[] = { {0x9e, 0x9e, "ldov", IORESOURCE_REG, }, }; static struct mfd_cell reg_devs[] = { { .name = "max8925-regulator", .id = 0, .num_resources = ARRAY_SIZE(sd1_resources), .resources = sd1_resources, }, { .name = "max8925-regulator", .id = 1, .num_resources = ARRAY_SIZE(sd2_resources), .resources = sd2_resources, }, { .name = "max8925-regulator", .id = 2, .num_resources = ARRAY_SIZE(sd3_resources), .resources = sd3_resources, }, { .name = "max8925-regulator", .id = 3, .num_resources = ARRAY_SIZE(ldo1_resources), .resources = ldo1_resources, }, { .name = "max8925-regulator", .id = 4, .num_resources = ARRAY_SIZE(ldo2_resources), .resources = ldo2_resources, }, { .name = "max8925-regulator", .id = 5, .num_resources = ARRAY_SIZE(ldo3_resources), .resources = ldo3_resources, }, { .name = "max8925-regulator", .id = 6, .num_resources = ARRAY_SIZE(ldo4_resources), .resources = ldo4_resources, }, { .name = "max8925-regulator", .id = 7, .num_resources = ARRAY_SIZE(ldo5_resources), .resources = ldo5_resources, }, { .name = "max8925-regulator", .id = 8, .num_resources = ARRAY_SIZE(ldo6_resources), .resources = ldo6_resources, }, { .name = "max8925-regulator", .id = 9, .num_resources = ARRAY_SIZE(ldo7_resources), .resources = ldo7_resources, }, { .name = "max8925-regulator", .id = 10, .num_resources = ARRAY_SIZE(ldo8_resources), .resources = ldo8_resources, }, { .name = "max8925-regulator", .id = 11, .num_resources = ARRAY_SIZE(ldo9_resources), .resources = ldo9_resources, }, { .name = "max8925-regulator", .id = 12, .num_resources = ARRAY_SIZE(ldo10_resources), .resources = ldo10_resources, }, { .name = "max8925-regulator", .id = 13, .num_resources = ARRAY_SIZE(ldo11_resources), .resources = ldo11_resources, }, { .name = "max8925-regulator", .id = 14, .num_resources = ARRAY_SIZE(ldo12_resources), .resources = ldo12_resources, }, { .name = "max8925-regulator", .id = 15, .num_resources = ARRAY_SIZE(ldo13_resources), .resources = ldo13_resources, }, { .name = "max8925-regulator", .id = 16, .num_resources = ARRAY_SIZE(ldo14_resources), .resources = ldo14_resources, }, { .name = "max8925-regulator", .id = 17, .num_resources = ARRAY_SIZE(ldo15_resources), .resources = ldo15_resources, }, { .name = "max8925-regulator", .id = 18, .num_resources = ARRAY_SIZE(ldo16_resources), .resources = ldo16_resources, }, { .name = "max8925-regulator", .id = 19, .num_resources = ARRAY_SIZE(ldo17_resources), .resources = ldo17_resources, }, { .name = "max8925-regulator", .id = 20, .num_resources = ARRAY_SIZE(ldo18_resources), .resources = ldo18_resources, }, { .name = "max8925-regulator", .id = 21, .num_resources = ARRAY_SIZE(ldo19_resources), .resources = ldo19_resources, }, { .name = "max8925-regulator", .id = 22, .num_resources = ARRAY_SIZE(ldo20_resources), .resources = ldo20_resources, }, }; enum { FLAGS_ADC = 1, /* register in ADC component */ FLAGS_RTC, /* register in RTC component */ }; struct max8925_irq_data { int reg; int mask_reg; int enable; /* enable or not */ int offs; /* bit offset in mask register */ int flags; int tsc_irq; }; static struct max8925_irq_data max8925_irqs[] = { [MAX8925_IRQ_VCHG_DC_OVP] = { .reg = MAX8925_CHG_IRQ1, .mask_reg = MAX8925_CHG_IRQ1_MASK, .offs = 1 << 0, }, [MAX8925_IRQ_VCHG_DC_F] = { .reg = MAX8925_CHG_IRQ1, .mask_reg = MAX8925_CHG_IRQ1_MASK, .offs = 1 << 1, }, [MAX8925_IRQ_VCHG_DC_R] = { .reg = MAX8925_CHG_IRQ1, .mask_reg = MAX8925_CHG_IRQ1_MASK, .offs = 1 << 2, }, [MAX8925_IRQ_VCHG_THM_OK_R] = { .reg = MAX8925_CHG_IRQ2, .mask_reg = MAX8925_CHG_IRQ2_MASK, .offs = 1 << 0, }, [MAX8925_IRQ_VCHG_THM_OK_F] = { .reg = MAX8925_CHG_IRQ2, .mask_reg = MAX8925_CHG_IRQ2_MASK, .offs = 1 << 1, }, [MAX8925_IRQ_VCHG_SYSLOW_F] = { .reg = MAX8925_CHG_IRQ2, .mask_reg = MAX8925_CHG_IRQ2_MASK, .offs = 1 << 2, }, [MAX8925_IRQ_VCHG_SYSLOW_R] = { .reg = MAX8925_CHG_IRQ2, .mask_reg = MAX8925_CHG_IRQ2_MASK, .offs = 1 << 3, }, [MAX8925_IRQ_VCHG_RST] = { .reg = MAX8925_CHG_IRQ2, .mask_reg = MAX8925_CHG_IRQ2_MASK, .offs = 1 << 4, }, [MAX8925_IRQ_VCHG_DONE] = { .reg = MAX8925_CHG_IRQ2, .mask_reg = MAX8925_CHG_IRQ2_MASK, .offs = 1 << 5, }, [MAX8925_IRQ_VCHG_TOPOFF] = { .reg = MAX8925_CHG_IRQ2, .mask_reg = MAX8925_CHG_IRQ2_MASK, .offs = 1 << 6, }, [MAX8925_IRQ_VCHG_TMR_FAULT] = { .reg = MAX8925_CHG_IRQ2, .mask_reg = MAX8925_CHG_IRQ2_MASK, .offs = 1 << 7, }, [MAX8925_IRQ_GPM_RSTIN] = { .reg = MAX8925_ON_OFF_IRQ1, .mask_reg = MAX8925_ON_OFF_IRQ1_MASK, .offs = 1 << 0, }, [MAX8925_IRQ_GPM_MPL] = { .reg = MAX8925_ON_OFF_IRQ1, .mask_reg = MAX8925_ON_OFF_IRQ1_MASK, .offs = 1 << 1, }, [MAX8925_IRQ_GPM_SW_3SEC] = { .reg = MAX8925_ON_OFF_IRQ1, .mask_reg = MAX8925_ON_OFF_IRQ1_MASK, .offs = 1 << 2, }, [MAX8925_IRQ_GPM_EXTON_F] = { .reg = MAX8925_ON_OFF_IRQ1, .mask_reg = MAX8925_ON_OFF_IRQ1_MASK, .offs = 1 << 3, }, [MAX8925_IRQ_GPM_EXTON_R] = { .reg = MAX8925_ON_OFF_IRQ1, .mask_reg = MAX8925_ON_OFF_IRQ1_MASK, .offs = 1 << 4, }, [MAX8925_IRQ_GPM_SW_1SEC] = { .reg = MAX8925_ON_OFF_IRQ1, .mask_reg = MAX8925_ON_OFF_IRQ1_MASK, .offs = 1 << 5, }, [MAX8925_IRQ_GPM_SW_F] = { .reg = MAX8925_ON_OFF_IRQ1, .mask_reg = MAX8925_ON_OFF_IRQ1_MASK, .offs = 1 << 6, }, [MAX8925_IRQ_GPM_SW_R] = { .reg = MAX8925_ON_OFF_IRQ1, .mask_reg = MAX8925_ON_OFF_IRQ1_MASK, .offs = 1 << 7, }, [MAX8925_IRQ_GPM_SYSCKEN_F] = { .reg = MAX8925_ON_OFF_IRQ2, .mask_reg = MAX8925_ON_OFF_IRQ2_MASK, .offs = 1 << 0, }, [MAX8925_IRQ_GPM_SYSCKEN_R] = { .reg = MAX8925_ON_OFF_IRQ2, .mask_reg = MAX8925_ON_OFF_IRQ2_MASK, .offs = 1 << 1, }, [MAX8925_IRQ_RTC_ALARM1] = { .reg = MAX8925_RTC_IRQ, .mask_reg = MAX8925_RTC_IRQ_MASK, .offs = 1 << 2, .flags = FLAGS_RTC, }, [MAX8925_IRQ_RTC_ALARM0] = { .reg = MAX8925_RTC_IRQ, .mask_reg = MAX8925_RTC_IRQ_MASK, .offs = 1 << 3, .flags = FLAGS_RTC, }, [MAX8925_IRQ_TSC_STICK] = { .reg = MAX8925_TSC_IRQ, .mask_reg = MAX8925_TSC_IRQ_MASK, .offs = 1 << 0, .flags = FLAGS_ADC, .tsc_irq = 1, }, [MAX8925_IRQ_TSC_NSTICK] = { .reg = MAX8925_TSC_IRQ, .mask_reg = MAX8925_TSC_IRQ_MASK, .offs = 1 << 1, .flags = FLAGS_ADC, .tsc_irq = 1, }, }; static inline struct max8925_irq_data *irq_to_max8925(struct max8925_chip *chip, int irq) { return &max8925_irqs[irq - chip->irq_base]; } static irqreturn_t max8925_irq(int irq, void *data) { struct max8925_chip *chip = data; struct max8925_irq_data *irq_data; struct i2c_client *i2c; int read_reg = -1, value = 0; int i; for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { irq_data = &max8925_irqs[i]; /* TSC IRQ should be serviced in max8925_tsc_irq() */ if (irq_data->tsc_irq) continue; if (irq_data->flags == FLAGS_RTC) i2c = chip->rtc; else if (irq_data->flags == FLAGS_ADC) i2c = chip->adc; else i2c = chip->i2c; if (read_reg != irq_data->reg) { read_reg = irq_data->reg; value = max8925_reg_read(i2c, irq_data->reg); } if (value & irq_data->enable) handle_nested_irq(chip->irq_base + i); } return IRQ_HANDLED; } static irqreturn_t max8925_tsc_irq(int irq, void *data) { struct max8925_chip *chip = data; struct max8925_irq_data *irq_data; struct i2c_client *i2c; int read_reg = -1, value = 0; int i; for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { irq_data = &max8925_irqs[i]; /* non TSC IRQ should be serviced in max8925_irq() */ if (!irq_data->tsc_irq) continue; if (irq_data->flags == FLAGS_RTC) i2c = chip->rtc; else if (irq_data->flags == FLAGS_ADC) i2c = chip->adc; else i2c = chip->i2c; if (read_reg != irq_data->reg) { read_reg = irq_data->reg; value = max8925_reg_read(i2c, irq_data->reg); } if (value & irq_data->enable) handle_nested_irq(chip->irq_base + i); } return IRQ_HANDLED; } static void max8925_irq_lock(struct irq_data *data) { struct max8925_chip *chip = irq_data_get_irq_chip_data(data); mutex_lock(&chip->irq_lock); } static void max8925_irq_sync_unlock(struct irq_data *data) { struct max8925_chip *chip = irq_data_get_irq_chip_data(data); struct max8925_irq_data *irq_data; static unsigned char cache_chg[2] = {0xff, 0xff}; static unsigned char cache_on[2] = {0xff, 0xff}; static unsigned char cache_rtc = 0xff, cache_tsc = 0xff; unsigned char irq_chg[2], irq_on[2]; unsigned char irq_rtc, irq_tsc; int i; /* Load cached value. In initial, all IRQs are masked */ irq_chg[0] = cache_chg[0]; irq_chg[1] = cache_chg[1]; irq_on[0] = cache_on[0]; irq_on[1] = cache_on[1]; irq_rtc = cache_rtc; irq_tsc = cache_tsc; for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { irq_data = &max8925_irqs[i]; /* 1 -- disable, 0 -- enable */ switch (irq_data->mask_reg) { case MAX8925_CHG_IRQ1_MASK: irq_chg[0] &= ~irq_data->enable; break; case MAX8925_CHG_IRQ2_MASK: irq_chg[1] &= ~irq_data->enable; break; case MAX8925_ON_OFF_IRQ1_MASK: irq_on[0] &= ~irq_data->enable; break; case MAX8925_ON_OFF_IRQ2_MASK: irq_on[1] &= ~irq_data->enable; break; case MAX8925_RTC_IRQ_MASK: irq_rtc &= ~irq_data->enable; break; case MAX8925_TSC_IRQ_MASK: irq_tsc &= ~irq_data->enable; break; default: dev_err(chip->dev, "wrong IRQ\n"); break; } } /* update mask into registers */ if (cache_chg[0] != irq_chg[0]) { cache_chg[0] = irq_chg[0]; max8925_reg_write(chip->i2c, MAX8925_CHG_IRQ1_MASK, irq_chg[0]); } if (cache_chg[1] != irq_chg[1]) { cache_chg[1] = irq_chg[1]; max8925_reg_write(chip->i2c, MAX8925_CHG_IRQ2_MASK, irq_chg[1]); } if (cache_on[0] != irq_on[0]) { cache_on[0] = irq_on[0]; max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ1_MASK, irq_on[0]); } if (cache_on[1] != irq_on[1]) { cache_on[1] = irq_on[1]; max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ2_MASK, irq_on[1]); } if (cache_rtc != irq_rtc) { cache_rtc = irq_rtc; max8925_reg_write(chip->rtc, MAX8925_RTC_IRQ_MASK, irq_rtc); } if (cache_tsc != irq_tsc) { cache_tsc = irq_tsc; max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, irq_tsc); } mutex_unlock(&chip->irq_lock); } static void max8925_irq_enable(struct irq_data *data) { struct max8925_chip *chip = irq_data_get_irq_chip_data(data); max8925_irqs[data->irq - chip->irq_base].enable = max8925_irqs[data->irq - chip->irq_base].offs; } static void max8925_irq_disable(struct irq_data *data) { struct max8925_chip *chip = irq_data_get_irq_chip_data(data); max8925_irqs[data->irq - chip->irq_base].enable = 0; } static struct irq_chip max8925_irq_chip = { .name = "max8925", .irq_bus_lock = max8925_irq_lock, .irq_bus_sync_unlock = max8925_irq_sync_unlock, .irq_enable = max8925_irq_enable, .irq_disable = max8925_irq_disable, }; static int max8925_irq_domain_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_data(virq, d->host_data); irq_set_chip_and_handler(virq, &max8925_irq_chip, handle_edge_irq); irq_set_nested_thread(virq, 1); #ifdef CONFIG_ARM set_irq_flags(virq, IRQF_VALID); #else irq_set_noprobe(virq); #endif return 0; } static struct irq_domain_ops max8925_irq_domain_ops = { .map = max8925_irq_domain_map, .xlate = irq_domain_xlate_onetwocell, }; static int max8925_irq_init(struct max8925_chip *chip, int irq, struct max8925_platform_data *pdata) { unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT; int ret; struct device_node *node = chip->dev->of_node; /* clear all interrupts */ max8925_reg_read(chip->i2c, MAX8925_CHG_IRQ1); max8925_reg_read(chip->i2c, MAX8925_CHG_IRQ2); max8925_reg_read(chip->i2c, MAX8925_ON_OFF_IRQ1); max8925_reg_read(chip->i2c, MAX8925_ON_OFF_IRQ2); max8925_reg_read(chip->rtc, MAX8925_RTC_IRQ); max8925_reg_read(chip->adc, MAX8925_TSC_IRQ); /* mask all interrupts except for TSC */ max8925_reg_write(chip->rtc, MAX8925_ALARM0_CNTL, 0); max8925_reg_write(chip->rtc, MAX8925_ALARM1_CNTL, 0); max8925_reg_write(chip->i2c, MAX8925_CHG_IRQ1_MASK, 0xff); max8925_reg_write(chip->i2c, MAX8925_CHG_IRQ2_MASK, 0xff); max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ1_MASK, 0xff); max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ2_MASK, 0xff); max8925_reg_write(chip->rtc, MAX8925_RTC_IRQ_MASK, 0xff); mutex_init(&chip->irq_lock); chip->irq_base = irq_alloc_descs(-1, 0, MAX8925_NR_IRQS, 0); if (chip->irq_base < 0) { dev_err(chip->dev, "Failed to allocate interrupts, ret:%d\n", chip->irq_base); return -EBUSY; } irq_domain_add_legacy(node, MAX8925_NR_IRQS, chip->irq_base, 0, &max8925_irq_domain_ops, chip); /* request irq handler for pmic main irq*/ chip->core_irq = irq; if (!chip->core_irq) return -EBUSY; ret = request_threaded_irq(irq, NULL, max8925_irq, flags | IRQF_ONESHOT, "max8925", chip); if (ret) { dev_err(chip->dev, "Failed to request core IRQ: %d\n", ret); chip->core_irq = 0; return -EBUSY; } /* request irq handler for pmic tsc irq*/ /* mask TSC interrupt */ max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, 0x0f); if (!pdata->tsc_irq) { dev_warn(chip->dev, "No interrupt support on TSC IRQ\n"); return 0; } chip->tsc_irq = pdata->tsc_irq; ret = request_threaded_irq(chip->tsc_irq, NULL, max8925_tsc_irq, flags | IRQF_ONESHOT, "max8925-tsc", chip); if (ret) { dev_err(chip->dev, "Failed to request TSC IRQ: %d\n", ret); chip->tsc_irq = 0; } return 0; } static void init_regulator(struct max8925_chip *chip, struct max8925_platform_data *pdata) { int ret; if (!pdata) return; if (pdata->sd1) { reg_devs[0].platform_data = pdata->sd1; reg_devs[0].pdata_size = sizeof(struct regulator_init_data); } if (pdata->sd2) { reg_devs[1].platform_data = pdata->sd2; reg_devs[1].pdata_size = sizeof(struct regulator_init_data); } if (pdata->sd3) { reg_devs[2].platform_data = pdata->sd3; reg_devs[2].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo1) { reg_devs[3].platform_data = pdata->ldo1; reg_devs[3].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo2) { reg_devs[4].platform_data = pdata->ldo2; reg_devs[4].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo3) { reg_devs[5].platform_data = pdata->ldo3; reg_devs[5].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo4) { reg_devs[6].platform_data = pdata->ldo4; reg_devs[6].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo5) { reg_devs[7].platform_data = pdata->ldo5; reg_devs[7].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo6) { reg_devs[8].platform_data = pdata->ldo6; reg_devs[8].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo7) { reg_devs[9].platform_data = pdata->ldo7; reg_devs[9].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo8) { reg_devs[10].platform_data = pdata->ldo8; reg_devs[10].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo9) { reg_devs[11].platform_data = pdata->ldo9; reg_devs[11].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo10) { reg_devs[12].platform_data = pdata->ldo10; reg_devs[12].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo11) { reg_devs[13].platform_data = pdata->ldo11; reg_devs[13].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo12) { reg_devs[14].platform_data = pdata->ldo12; reg_devs[14].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo13) { reg_devs[15].platform_data = pdata->ldo13; reg_devs[15].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo14) { reg_devs[16].platform_data = pdata->ldo14; reg_devs[16].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo15) { reg_devs[17].platform_data = pdata->ldo15; reg_devs[17].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo16) { reg_devs[18].platform_data = pdata->ldo16; reg_devs[18].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo17) { reg_devs[19].platform_data = pdata->ldo17; reg_devs[19].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo18) { reg_devs[20].platform_data = pdata->ldo18; reg_devs[20].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo19) { reg_devs[21].platform_data = pdata->ldo19; reg_devs[21].pdata_size = sizeof(struct regulator_init_data); } if (pdata->ldo20) { reg_devs[22].platform_data = pdata->ldo20; reg_devs[22].pdata_size = sizeof(struct regulator_init_data); } ret = mfd_add_devices(chip->dev, 0, reg_devs, ARRAY_SIZE(reg_devs), NULL, 0, NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add regulator subdev\n"); return; } } int max8925_device_init(struct max8925_chip *chip, struct max8925_platform_data *pdata) { int ret; max8925_irq_init(chip, chip->i2c->irq, pdata); if (pdata && (pdata->power || pdata->touch)) { /* enable ADC to control internal reference */ max8925_set_bits(chip->i2c, MAX8925_RESET_CNFG, 1, 1); /* enable internal reference for ADC */ max8925_set_bits(chip->adc, MAX8925_TSC_CNFG1, 3, 2); /* check for internal reference IRQ */ do { ret = max8925_reg_read(chip->adc, MAX8925_TSC_IRQ); } while (ret & MAX8925_NREF_OK); /* enaable ADC scheduler, interval is 1 second */ max8925_set_bits(chip->adc, MAX8925_ADC_SCHED, 3, 2); } /* enable Momentary Power Loss */ max8925_set_bits(chip->rtc, MAX8925_MPL_CNTL, 1 << 4, 1 << 4); ret = mfd_add_devices(chip->dev, 0, &rtc_devs[0], ARRAY_SIZE(rtc_devs), NULL, chip->irq_base, NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add rtc subdev\n"); goto out; } ret = mfd_add_devices(chip->dev, 0, &onkey_devs[0], ARRAY_SIZE(onkey_devs), NULL, chip->irq_base, NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add onkey subdev\n"); goto out_dev; } init_regulator(chip, pdata); if (pdata && pdata->backlight) { bk_devs[0].platform_data = &pdata->backlight; bk_devs[0].pdata_size = sizeof(struct max8925_backlight_pdata); } ret = mfd_add_devices(chip->dev, 0, bk_devs, ARRAY_SIZE(bk_devs), NULL, 0, NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add backlight subdev\n"); goto out_dev; } ret = mfd_add_devices(chip->dev, 0, &power_devs[0], ARRAY_SIZE(power_devs), NULL, 0, NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add power supply subdev, err = %d\n", ret); goto out_dev; } if (pdata && pdata->touch) { ret = mfd_add_devices(chip->dev, 0, &touch_devs[0], ARRAY_SIZE(touch_devs), NULL, chip->tsc_irq, NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add touch subdev\n"); goto out_dev; } } return 0; out_dev: mfd_remove_devices(chip->dev); out: return ret; } void max8925_device_exit(struct max8925_chip *chip) { if (chip->core_irq) free_irq(chip->core_irq, chip); if (chip->tsc_irq) free_irq(chip->tsc_irq, chip); mfd_remove_devices(chip->dev); } MODULE_DESCRIPTION("PMIC Driver for Maxim MAX8925"); MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com"); MODULE_LICENSE("GPL");
gpl-2.0
black9/Nyan-Tuna-JB
drivers/i2c/busses/i2c-s6000.c
2377
11249
/* * drivers/i2c/busses/i2c-s6000.c * * Description: Driver for S6000 Family I2C Interface * Copyright (c) 2008 emlix GmbH * Author: Oskar Schirmer <os@emlix.com> * * Partially based on i2c-bfin-twi.c driver by <sonic.zhang@analog.com> * Copyright (c) 2005-2007 Analog Devices, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/clk.h> #include <linux/err.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/i2c/s6000.h> #include <linux/timer.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/io.h> #include "i2c-s6000.h" #define DRV_NAME "i2c-s6000" #define POLL_TIMEOUT (2 * HZ) struct s6i2c_if { u8 __iomem *reg; /* memory mapped registers */ int irq; spinlock_t lock; struct i2c_msg *msgs; /* messages currently handled */ int msgs_num; /* nb of msgs to do */ int msgs_push; /* nb of msgs read/written */ int msgs_done; /* nb of msgs finally handled */ unsigned push; /* nb of bytes read/written in msg */ unsigned done; /* nb of bytes finally handled */ int timeout_count; /* timeout retries left */ struct timer_list timeout_timer; struct i2c_adapter adap; struct completion complete; struct clk *clk; struct resource *res; }; static inline u16 i2c_rd16(struct s6i2c_if *iface, unsigned n) { return readw(iface->reg + (n)); } static inline void i2c_wr16(struct s6i2c_if *iface, unsigned n, u16 v) { writew(v, iface->reg + (n)); } static inline u32 i2c_rd32(struct s6i2c_if *iface, unsigned n) { return readl(iface->reg + (n)); } static inline void i2c_wr32(struct s6i2c_if *iface, unsigned n, u32 v) { writel(v, iface->reg + (n)); } static struct s6i2c_if s6i2c_if; static void s6i2c_handle_interrupt(struct s6i2c_if *iface) { if (i2c_rd16(iface, S6_I2C_INTRSTAT) & (1 << S6_I2C_INTR_TXABRT)) { i2c_rd16(iface, S6_I2C_CLRTXABRT); i2c_wr16(iface, S6_I2C_INTRMASK, 0); complete(&iface->complete); return; } if (iface->msgs_done >= iface->msgs_num) { dev_err(&iface->adap.dev, "s6i2c: spurious I2C irq: %04x\n", i2c_rd16(iface, S6_I2C_INTRSTAT)); i2c_wr16(iface, S6_I2C_INTRMASK, 0); return; } while ((iface->msgs_push < iface->msgs_num) && (i2c_rd16(iface, S6_I2C_STATUS) & (1 << S6_I2C_STATUS_TFNF))) { struct i2c_msg *m = &iface->msgs[iface->msgs_push]; if (!(m->flags & I2C_M_RD)) i2c_wr16(iface, S6_I2C_DATACMD, m->buf[iface->push]); else i2c_wr16(iface, S6_I2C_DATACMD, 1 << S6_I2C_DATACMD_READ); if (++iface->push >= m->len) { iface->push = 0; iface->msgs_push += 1; } } do { struct i2c_msg *m = &iface->msgs[iface->msgs_done]; if (!(m->flags & I2C_M_RD)) { if (iface->msgs_done < iface->msgs_push) iface->msgs_done += 1; else break; } else if (i2c_rd16(iface, S6_I2C_STATUS) & (1 << S6_I2C_STATUS_RFNE)) { m->buf[iface->done] = i2c_rd16(iface, S6_I2C_DATACMD); if (++iface->done >= m->len) { iface->done = 0; iface->msgs_done += 1; } } else{ break; } } while (iface->msgs_done < iface->msgs_num); if (iface->msgs_done >= iface->msgs_num) { i2c_wr16(iface, S6_I2C_INTRMASK, 1 << S6_I2C_INTR_TXABRT); complete(&iface->complete); } else if (iface->msgs_push >= iface->msgs_num) { i2c_wr16(iface, S6_I2C_INTRMASK, (1 << S6_I2C_INTR_TXABRT) | (1 << S6_I2C_INTR_RXFULL)); } else { i2c_wr16(iface, S6_I2C_INTRMASK, (1 << S6_I2C_INTR_TXABRT) | (1 << S6_I2C_INTR_TXEMPTY) | (1 << S6_I2C_INTR_RXFULL)); } } static irqreturn_t s6i2c_interrupt_entry(int irq, void *dev_id) { struct s6i2c_if *iface = dev_id; if (!(i2c_rd16(iface, S6_I2C_STATUS) & ((1 << S6_I2C_INTR_RXUNDER) | (1 << S6_I2C_INTR_RXOVER) | (1 << S6_I2C_INTR_RXFULL) | (1 << S6_I2C_INTR_TXOVER) | (1 << S6_I2C_INTR_TXEMPTY) | (1 << S6_I2C_INTR_RDREQ) | (1 << S6_I2C_INTR_TXABRT) | (1 << S6_I2C_INTR_RXDONE) | (1 << S6_I2C_INTR_ACTIVITY) | (1 << S6_I2C_INTR_STOPDET) | (1 << S6_I2C_INTR_STARTDET) | (1 << S6_I2C_INTR_GENCALL)))) return IRQ_NONE; spin_lock(&iface->lock); del_timer(&iface->timeout_timer); s6i2c_handle_interrupt(iface); spin_unlock(&iface->lock); return IRQ_HANDLED; } static void s6i2c_timeout(unsigned long data) { struct s6i2c_if *iface = (struct s6i2c_if *)data; unsigned long flags; spin_lock_irqsave(&iface->lock, flags); s6i2c_handle_interrupt(iface); if (--iface->timeout_count > 0) { iface->timeout_timer.expires = jiffies + POLL_TIMEOUT; add_timer(&iface->timeout_timer); } else { complete(&iface->complete); i2c_wr16(iface, S6_I2C_INTRMASK, 0); } spin_unlock_irqrestore(&iface->lock, flags); } static int s6i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct s6i2c_if *iface = adap->algo_data; int i; if (num == 0) return 0; if (i2c_rd16(iface, S6_I2C_STATUS) & (1 << S6_I2C_STATUS_ACTIVITY)) yield(); i2c_wr16(iface, S6_I2C_INTRMASK, 0); i2c_rd16(iface, S6_I2C_CLRINTR); for (i = 0; i < num; i++) { if (msgs[i].flags & I2C_M_TEN) { dev_err(&adap->dev, "s6i2c: 10 bits addr not supported\n"); return -EINVAL; } if (msgs[i].len == 0) { dev_err(&adap->dev, "s6i2c: zero length message not supported\n"); return -EINVAL; } if (msgs[i].addr != msgs[0].addr) { dev_err(&adap->dev, "s6i2c: multiple xfer cannot change target\n"); return -EINVAL; } } iface->msgs = msgs; iface->msgs_num = num; iface->msgs_push = 0; iface->msgs_done = 0; iface->push = 0; iface->done = 0; iface->timeout_count = 10; i2c_wr16(iface, S6_I2C_TAR, msgs[0].addr); i2c_wr16(iface, S6_I2C_ENABLE, 1); i2c_wr16(iface, S6_I2C_INTRMASK, (1 << S6_I2C_INTR_TXEMPTY) | (1 << S6_I2C_INTR_TXABRT)); iface->timeout_timer.expires = jiffies + POLL_TIMEOUT; add_timer(&iface->timeout_timer); wait_for_completion(&iface->complete); del_timer_sync(&iface->timeout_timer); while (i2c_rd32(iface, S6_I2C_TXFLR) > 0) schedule(); while (i2c_rd16(iface, S6_I2C_STATUS) & (1 << S6_I2C_STATUS_ACTIVITY)) schedule(); i2c_wr16(iface, S6_I2C_INTRMASK, 0); i2c_wr16(iface, S6_I2C_ENABLE, 0); return iface->msgs_done; } static u32 s6i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static struct i2c_algorithm s6i2c_algorithm = { .master_xfer = s6i2c_master_xfer, .functionality = s6i2c_functionality, }; static u16 __devinit nanoseconds_on_clk(struct s6i2c_if *iface, u32 ns) { u32 dividend = ((clk_get_rate(iface->clk) / 1000) * ns) / 1000000; if (dividend > 0xffff) return 0xffff; return dividend; } static int __devinit s6i2c_probe(struct platform_device *dev) { struct s6i2c_if *iface = &s6i2c_if; struct i2c_adapter *p_adap; const char *clock; int bus_num, rc; spin_lock_init(&iface->lock); init_completion(&iface->complete); iface->irq = platform_get_irq(dev, 0); if (iface->irq < 0) { rc = iface->irq; goto err_out; } iface->res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!iface->res) { rc = -ENXIO; goto err_out; } iface->res = request_mem_region(iface->res->start, resource_size(iface->res), dev->dev.bus_id); if (!iface->res) { rc = -EBUSY; goto err_out; } iface->reg = ioremap_nocache(iface->res->start, resource_size(iface->res)); if (!iface->reg) { rc = -ENOMEM; goto err_reg; } clock = 0; bus_num = -1; if (dev->dev.platform_data) { struct s6_i2c_platform_data *pdata = dev->dev.platform_data; bus_num = pdata->bus_num; clock = pdata->clock; } iface->clk = clk_get(&dev->dev, clock); if (IS_ERR(iface->clk)) { rc = PTR_ERR(iface->clk); goto err_map; } rc = clk_enable(iface->clk); if (rc < 0) goto err_clk_put; init_timer(&iface->timeout_timer); iface->timeout_timer.function = s6i2c_timeout; iface->timeout_timer.data = (unsigned long)iface; p_adap = &iface->adap; strlcpy(p_adap->name, dev->name, sizeof(p_adap->name)); p_adap->algo = &s6i2c_algorithm; p_adap->algo_data = iface; p_adap->nr = bus_num; p_adap->class = 0; p_adap->dev.parent = &dev->dev; i2c_wr16(iface, S6_I2C_INTRMASK, 0); rc = request_irq(iface->irq, s6i2c_interrupt_entry, IRQF_SHARED, dev->name, iface); if (rc) { dev_err(&p_adap->dev, "s6i2c: can't get IRQ %d\n", iface->irq); goto err_clk_dis; } i2c_wr16(iface, S6_I2C_ENABLE, 0); udelay(1); i2c_wr32(iface, S6_I2C_SRESET, 1 << S6_I2C_SRESET_IC_SRST); i2c_wr16(iface, S6_I2C_CLRTXABRT, 1); i2c_wr16(iface, S6_I2C_CON, (1 << S6_I2C_CON_MASTER) | (S6_I2C_CON_SPEED_NORMAL << S6_I2C_CON_SPEED) | (0 << S6_I2C_CON_10BITSLAVE) | (0 << S6_I2C_CON_10BITMASTER) | (1 << S6_I2C_CON_RESTARTENA) | (1 << S6_I2C_CON_SLAVEDISABLE)); i2c_wr16(iface, S6_I2C_SSHCNT, nanoseconds_on_clk(iface, 4000)); i2c_wr16(iface, S6_I2C_SSLCNT, nanoseconds_on_clk(iface, 4700)); i2c_wr16(iface, S6_I2C_FSHCNT, nanoseconds_on_clk(iface, 600)); i2c_wr16(iface, S6_I2C_FSLCNT, nanoseconds_on_clk(iface, 1300)); i2c_wr16(iface, S6_I2C_RXTL, 0); i2c_wr16(iface, S6_I2C_TXTL, 0); platform_set_drvdata(dev, iface); if (bus_num < 0) rc = i2c_add_adapter(p_adap); else rc = i2c_add_numbered_adapter(p_adap); if (rc) goto err_irq_free; return 0; err_irq_free: free_irq(iface->irq, iface); err_clk_dis: clk_disable(iface->clk); err_clk_put: clk_put(iface->clk); err_map: iounmap(iface->reg); err_reg: release_mem_region(iface->res->start, resource_size(iface->res)); err_out: return rc; } static int __devexit s6i2c_remove(struct platform_device *pdev) { struct s6i2c_if *iface = platform_get_drvdata(pdev); i2c_wr16(iface, S6_I2C_ENABLE, 0); platform_set_drvdata(pdev, NULL); i2c_del_adapter(&iface->adap); free_irq(iface->irq, iface); clk_disable(iface->clk); clk_put(iface->clk); iounmap(iface->reg); release_mem_region(iface->res->start, resource_size(iface->res)); return 0; } static struct platform_driver s6i2c_driver = { .probe = s6i2c_probe, .remove = __devexit_p(s6i2c_remove), .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, }; static int __init s6i2c_init(void) { pr_info("I2C: S6000 I2C driver\n"); return platform_driver_register(&s6i2c_driver); } static void __exit s6i2c_exit(void) { platform_driver_unregister(&s6i2c_driver); } MODULE_DESCRIPTION("I2C-Bus adapter routines for S6000 I2C"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); subsys_initcall(s6i2c_init); module_exit(s6i2c_exit);
gpl-2.0
mv0/tip
arch/arm/mach-imx/devices/devices.c
2633
1264
/* * Copyright 2008 Sascha Hauer, kernel@pengutronix.de * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/err.h> #include <linux/platform_device.h> struct device mxc_aips_bus = { .init_name = "mxc_aips", .parent = &platform_bus, }; struct device mxc_ahb_bus = { .init_name = "mxc_ahb", .parent = &platform_bus, }; int __init mxc_device_init(void) { int ret; ret = device_register(&mxc_aips_bus); if (ret < 0) goto done; ret = device_register(&mxc_ahb_bus); done: return ret; }
gpl-2.0
Hardslog/GrimTF300
arch/powerpc/kvm/book3s_32_mmu_host.c
3145
9745
/* * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. * * Authors: * Alexander Graf <agraf@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/kvm_host.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/mmu-hash32.h> #include <asm/machdep.h> #include <asm/mmu_context.h> #include <asm/hw_irq.h> /* #define DEBUG_MMU */ /* #define DEBUG_SR */ #ifdef DEBUG_MMU #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__) #else #define dprintk_mmu(a, ...) do { } while(0) #endif #ifdef DEBUG_SR #define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__) #else #define dprintk_sr(a, ...) do { } while(0) #endif #if PAGE_SHIFT != 12 #error Unknown page size #endif #ifdef CONFIG_SMP #error XXX need to grab mmu_hash_lock #endif #ifdef CONFIG_PTE_64BIT #error Only 32 bit pages are supported for now #endif static ulong htab; static u32 htabmask; void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) { volatile u32 *pteg; /* Remove from host HTAB */ pteg = (u32*)pte->slot; pteg[0] = 0; /* And make sure it's gone from the TLB too */ asm volatile ("sync"); asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory"); asm volatile ("sync"); asm volatile ("tlbsync"); } /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using * a hash, so we don't waste cycles on looping */ static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) { return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); } static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) { struct kvmppc_sid_map *map; u16 sid_map_mask; if (vcpu->arch.shared->msr & MSR_PR) gvsid |= VSID_PR; sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); map = &to_book3s(vcpu)->sid_map[sid_map_mask]; if (map->guest_vsid == gvsid) { dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n", gvsid, map->host_vsid); return map; } map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; if (map->guest_vsid == gvsid) { dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n", gvsid, map->host_vsid); return map; } dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid); return NULL; } static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, bool primary) { u32 page, hash; ulong pteg = htab; page = (eaddr & ~ESID_MASK) >> 12; hash = ((vsid ^ page) << 6); if (!primary) hash = ~hash; hash &= htabmask; pteg |= hash; dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n", htab, hash, htabmask, pteg); return (u32*)pteg; } extern char etext[]; int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) { pfn_t hpaddr; u64 va; u64 vsid; struct kvmppc_sid_map *map; volatile u32 *pteg; u32 eaddr = orig_pte->eaddr; u32 pteg0, pteg1; register int rr = 0; bool primary = false; bool evict = false; struct hpte_cache *pte; /* Get host physical address for gpa */ hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); if (is_error_pfn(hpaddr)) { printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); return -EINVAL; } hpaddr <<= PAGE_SHIFT; /* and write the mapping ea -> hpa into the pt */ vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); map = find_sid_vsid(vcpu, vsid); if (!map) { kvmppc_mmu_map_segment(vcpu, eaddr); map = find_sid_vsid(vcpu, vsid); } BUG_ON(!map); vsid = map->host_vsid; va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK); next_pteg: if (rr == 16) { primary = !primary; evict = true; rr = 0; } pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary); /* not evicting yet */ if (!evict && (pteg[rr] & PTE_V)) { rr += 2; goto next_pteg; } dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr); dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]); dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]); dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]); dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]); dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]); dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]); dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]); dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]); pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V | (primary ? 0 : PTE_SEC); pteg1 = hpaddr | PTE_M | PTE_R | PTE_C; if (orig_pte->may_write) { pteg1 |= PP_RWRW; mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); } else { pteg1 |= PP_RWRX; } local_irq_disable(); if (pteg[rr]) { pteg[rr] = 0; asm volatile ("sync"); } pteg[rr + 1] = pteg1; pteg[rr] = pteg0; asm volatile ("sync"); local_irq_enable(); dprintk_mmu("KVM: new PTEG: %p\n", pteg); dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]); dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]); dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]); dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]); dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]); dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]); dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]); dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]); /* Now tell our Shadow PTE code about the new page */ pte = kvmppc_mmu_hpte_cache_next(vcpu); dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", orig_pte->may_write ? 'w' : '-', orig_pte->may_execute ? 'x' : '-', orig_pte->eaddr, (ulong)pteg, va, orig_pte->vpage, hpaddr); pte->slot = (ulong)&pteg[rr]; pte->host_va = va; pte->pte = *orig_pte; pte->pfn = hpaddr >> PAGE_SHIFT; kvmppc_mmu_hpte_cache_map(vcpu, pte); return 0; } static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) { struct kvmppc_sid_map *map; struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); u16 sid_map_mask; static int backwards_map = 0; if (vcpu->arch.shared->msr & MSR_PR) gvsid |= VSID_PR; /* We might get collisions that trap in preceding order, so let's map them differently */ sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); if (backwards_map) sid_map_mask = SID_MAP_MASK - sid_map_mask; map = &to_book3s(vcpu)->sid_map[sid_map_mask]; /* Make sure we're taking the other map next time */ backwards_map = !backwards_map; /* Uh-oh ... out of mappings. Let's flush! */ if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) { vcpu_book3s->vsid_next = 0; memset(vcpu_book3s->sid_map, 0, sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); kvmppc_mmu_pte_flush(vcpu, 0, 0); kvmppc_mmu_flush_segments(vcpu); } map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next]; vcpu_book3s->vsid_next++; map->guest_vsid = gvsid; map->valid = true; return map; } int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) { u32 esid = eaddr >> SID_SHIFT; u64 gvsid; u32 sr; struct kvmppc_sid_map *map; struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { /* Invalidate an entry */ svcpu->sr[esid] = SR_INVALID; return -ENOENT; } map = find_sid_vsid(vcpu, gvsid); if (!map) map = create_sid_map(vcpu, gvsid); map->guest_esid = esid; sr = map->host_vsid | SR_KP; svcpu->sr[esid] = sr; dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr); return 0; } void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) { int i; struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr)); for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++) svcpu->sr[i] = SR_INVALID; } void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) { int i; kvmppc_mmu_hpte_destroy(vcpu); preempt_disable(); for (i = 0; i < SID_CONTEXTS; i++) __destroy_context(to_book3s(vcpu)->context_id[i]); preempt_enable(); } /* From mm/mmu_context_hash32.c */ #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff) int kvmppc_mmu_init(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int err; ulong sdr1; int i; int j; for (i = 0; i < SID_CONTEXTS; i++) { err = __init_new_context(); if (err < 0) goto init_fail; vcpu3s->context_id[i] = err; /* Remember context id for this combination */ for (j = 0; j < 16; j++) vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j); } vcpu3s->vsid_next = 0; /* Remember where the HTAB is */ asm ( "mfsdr1 %0" : "=r"(sdr1) ); htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0; htab = (ulong)__va(sdr1 & 0xffff0000); kvmppc_mmu_hpte_init(vcpu); return 0; init_fail: for (j = 0; j < i; j++) { if (!vcpu3s->context_id[j]) continue; __destroy_context(to_book3s(vcpu)->context_id[j]); } return -1; }
gpl-2.0
nbr11/android_kernel_lge_hammerhead
drivers/misc/pmic8058-xoadc.c
3401
21995
/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/msm_adc.h> #include <linux/mfd/pm8xxx/core.h> #include <linux/mfd/pmic8058.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/ratelimit.h> #include <linux/delay.h> #include <linux/wakelock.h> #include <mach/mpp.h> #include <mach/msm_xo.h> #define ADC_DRIVER_NAME "pm8058-xoadc" #define MAX_QUEUE_LENGTH 0X15 #define MAX_CHANNEL_PROPERTIES_QUEUE 0X7 #define MAX_QUEUE_SLOT 0x1 /* User Processor */ #define ADC_ARB_USRP_CNTRL 0x197 #define ADC_ARB_USRP_CNTRL_EN_ARB BIT(0) #define ADC_ARB_USRP_CNTRL_RSV1 BIT(1) #define ADC_ARB_USRP_CNTRL_RSV2 BIT(2) #define ADC_ARB_USRP_CNTRL_RSV3 BIT(3) #define ADC_ARB_USRP_CNTRL_RSV4 BIT(4) #define ADC_ARB_USRP_CNTRL_RSV5 BIT(5) #define ADC_ARB_USRP_CNTRL_EOC BIT(6) #define ADC_ARB_USRP_CNTRL_REQ BIT(7) #define ADC_ARB_USRP_AMUX_CNTRL 0x198 #define ADC_ARB_USRP_ANA_PARAM 0x199 #define ADC_ARB_USRP_DIG_PARAM 0x19A #define ADC_ARB_USRP_RSV 0x19B #define ADC_ARB_USRP_DATA0 0x19D #define ADC_ARB_USRP_DATA1 0x19C struct pmic8058_adc { struct device *dev; struct xoadc_platform_data *pdata; struct adc_properties *adc_prop; struct xoadc_conv_state conv[2]; int xoadc_queue_count; int adc_irq; struct linear_graph *adc_graph; struct xoadc_conv_state *conv_slot_request; struct xoadc_conv_state *conv_queue_list; struct adc_conv_slot conv_queue_elements[MAX_QUEUE_LENGTH]; int xoadc_num; struct msm_xo_voter *adc_voter; struct wake_lock adc_wakelock; /* flag to warn/bug if wakelocks are taken after suspend_noirq */ int msm_suspend_check; }; static struct pmic8058_adc *pmic_adc[XOADC_PMIC_0 + 1]; static bool xoadc_initialized, xoadc_calib_first_adc; DEFINE_RATELIMIT_STATE(pm8058_xoadc_msg_ratelimit, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); static inline int pm8058_xoadc_can_print(void) { return __ratelimit(&pm8058_xoadc_msg_ratelimit); } int32_t pm8058_xoadc_registered(void) { return xoadc_initialized; } EXPORT_SYMBOL(pm8058_xoadc_registered); void pm8058_xoadc_restore_slot(uint32_t adc_instance, struct adc_conv_slot *slot) { struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance]; struct xoadc_conv_state *slot_state = adc_pmic->conv_slot_request; mutex_lock(&slot_state->list_lock); list_add(&slot->list, &slot_state->slots); mutex_unlock(&slot_state->list_lock); } EXPORT_SYMBOL(pm8058_xoadc_restore_slot); void pm8058_xoadc_slot_request(uint32_t adc_instance, struct adc_conv_slot **slot) { struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance]; struct xoadc_conv_state *slot_state = adc_pmic->conv_slot_request; mutex_lock(&slot_state->list_lock); if (!list_empty(&slot_state->slots)) { *slot = list_first_entry(&slot_state->slots, struct adc_conv_slot, list); list_del(&(*slot)->list); } else *slot = NULL; mutex_unlock(&slot_state->list_lock); } EXPORT_SYMBOL(pm8058_xoadc_slot_request); static int32_t pm8058_xoadc_arb_cntrl(uint32_t arb_cntrl, uint32_t adc_instance, uint32_t channel) { struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance]; int i, rc; u8 data_arb_cntrl; data_arb_cntrl = ADC_ARB_USRP_CNTRL_EOC | ADC_ARB_USRP_CNTRL_RSV5 | ADC_ARB_USRP_CNTRL_RSV4; if (arb_cntrl) { if (adc_pmic->msm_suspend_check) pr_err("XOADC request being made after suspend irq " "with channel id:%d\n", channel); data_arb_cntrl |= ADC_ARB_USRP_CNTRL_EN_ARB; msm_xo_mode_vote(adc_pmic->adc_voter, MSM_XO_MODE_ON); adc_pmic->pdata->xoadc_mpp_config(); wake_lock(&adc_pmic->adc_wakelock); } /* Write twice to the CNTRL register for the arbiter settings to take into effect */ for (i = 0; i < 2; i++) { rc = pm8xxx_writeb(adc_pmic->dev->parent, ADC_ARB_USRP_CNTRL, data_arb_cntrl); if (rc < 0) { pr_debug("%s: PM8058 write failed\n", __func__); return rc; } } if (!arb_cntrl) { msm_xo_mode_vote(adc_pmic->adc_voter, MSM_XO_MODE_OFF); wake_unlock(&adc_pmic->adc_wakelock); } return 0; } static int32_t pm8058_xoadc_configure(uint32_t adc_instance, struct adc_conv_slot *slot) { struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance]; u8 data_arb_cntrl = 0, data_amux_chan = 0, data_arb_rsv = 0; u8 data_dig_param = 0, data_ana_param2 = 0, data_ana_param = 0; int rc; rc = pm8058_xoadc_arb_cntrl(1, adc_instance, slot->chan_path); if (rc < 0) { pr_debug("%s: Configuring ADC Arbiter" "enable failed\n", __func__); return rc; } switch (slot->chan_path) { case CHAN_PATH_TYPE1: data_amux_chan = CHANNEL_VCOIN << 4; data_arb_rsv = 0x20; slot->chan_properties.gain_numerator = 1; slot->chan_properties.gain_denominator = 2; slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0]; break; case CHAN_PATH_TYPE2: data_amux_chan = CHANNEL_VBAT << 4; data_arb_rsv = 0x20; slot->chan_properties.gain_numerator = 1; slot->chan_properties.gain_denominator = 3; slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0]; break; case CHAN_PATH_TYPE3: data_amux_chan = CHANNEL_VCHG << 4; data_arb_rsv = 0x20; slot->chan_properties.gain_numerator = 1; slot->chan_properties.gain_denominator = 10; slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0]; break; case CHAN_PATH_TYPE4: data_amux_chan = CHANNEL_CHG_MONITOR << 4; data_arb_rsv = 0x20; slot->chan_properties.gain_numerator = 1; slot->chan_properties.gain_denominator = 1; slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0]; break; case CHAN_PATH_TYPE5: data_amux_chan = CHANNEL_VPH_PWR << 4; data_arb_rsv = 0x20; slot->chan_properties.gain_numerator = 1; slot->chan_properties.gain_denominator = 3; slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0]; break; case CHAN_PATH_TYPE6: data_amux_chan = CHANNEL_MPP5 << 4; data_arb_rsv = 0x20; slot->chan_properties.gain_numerator = 1; slot->chan_properties.gain_denominator = 1; slot->chan_properties.adc_graph = &adc_pmic->adc_graph[1]; break; case CHAN_PATH_TYPE7: data_amux_chan = CHANNEL_MPP6 << 4; data_arb_rsv = 0x20; slot->chan_properties.gain_numerator = 1; slot->chan_properties.gain_denominator = 1; slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0]; break; case CHAN_PATH_TYPE8: data_amux_chan = CHANNEL_MPP7 << 4; data_arb_rsv = 0x20; slot->chan_properties.gain_numerator = 1; slot->chan_properties.gain_denominator = 2; slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0]; break; case CHAN_PATH_TYPE9: data_amux_chan = CHANNEL_MPP8 << 4; data_arb_rsv = 0x20; slot->chan_properties.gain_numerator = 1; slot->chan_properties.gain_denominator = 2; slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0]; break; case CHAN_PATH_TYPE10: data_amux_chan = CHANNEL_MPP9 << 4; data_arb_rsv = 0x20; slot->chan_properties.gain_numerator = 1; slot->chan_properties.gain_denominator = 3; slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0]; break; case CHAN_PATH_TYPE11: data_amux_chan = CHANNEL_USB_VBUS << 4; data_arb_rsv = 0x20; slot->chan_properties.gain_numerator = 1; slot->chan_properties.gain_denominator = 3; slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0]; break; case CHAN_PATH_TYPE12: data_amux_chan = CHANNEL_DIE_TEMP << 4; data_arb_rsv = 0x20; slot->chan_properties.gain_numerator = 1; slot->chan_properties.gain_denominator = 1; slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0]; break; case CHAN_PATH_TYPE13: data_amux_chan = CHANNEL_125V << 4; data_arb_rsv = 0x20; slot->chan_properties.gain_numerator = 1; slot->chan_properties.gain_denominator = 1; slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0]; break; case CHAN_PATH_TYPE14: data_amux_chan = CHANNEL_INTERNAL_2 << 4; data_arb_rsv = 0x20; slot->chan_properties.gain_numerator = 1; slot->chan_properties.gain_denominator = 1; slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0]; break; case CHAN_PATH_TYPE_NONE: data_amux_chan = CHANNEL_MUXOFF << 4; data_arb_rsv = 0x10; slot->chan_properties.gain_numerator = 1; slot->chan_properties.gain_denominator = 1; slot->chan_properties.adc_graph = &adc_pmic->adc_graph[1]; break; case CHAN_PATH_TYPE15: data_amux_chan = CHANNEL_INTERNAL << 4; data_arb_rsv = 0x20; slot->chan_properties.gain_numerator = 1; slot->chan_properties.gain_denominator = 1; slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0]; break; } rc = pm8xxx_writeb(adc_pmic->dev->parent, ADC_ARB_USRP_AMUX_CNTRL, data_amux_chan); if (rc < 0) { pr_debug("%s: PM8058 write failed\n", __func__); return rc; } rc = pm8xxx_writeb(adc_pmic->dev->parent, ADC_ARB_USRP_RSV, data_arb_rsv); if (rc < 0) { pr_debug("%s: PM8058 write failed\n", __func__); return rc; } /* Set default clock rate to 2.4 MHz XO ADC clock digital */ switch (slot->chan_adc_config) { case ADC_CONFIG_TYPE1: data_ana_param = 0xFE; data_dig_param = 0x23; data_ana_param2 = 0xFF; /* AMUX register data to start the ADC conversion */ data_arb_cntrl = 0xF1; break; case ADC_CONFIG_TYPE2: data_ana_param = 0xFE; data_dig_param = 0x03; data_ana_param2 = 0xFF; /* AMUX register data to start the ADC conversion */ data_arb_cntrl = 0xF1; break; } rc = pm8xxx_writeb(adc_pmic->dev->parent, ADC_ARB_USRP_ANA_PARAM, data_ana_param); if (rc < 0) { pr_debug("%s: PM8058 write failed\n", __func__); return rc; } rc = pm8xxx_writeb(adc_pmic->dev->parent, ADC_ARB_USRP_DIG_PARAM, data_dig_param); if (rc < 0) { pr_debug("%s: PM8058 write failed\n", __func__); return rc; } rc = pm8xxx_writeb(adc_pmic->dev->parent, ADC_ARB_USRP_ANA_PARAM, data_ana_param2); if (rc < 0) { pr_debug("%s: PM8058 write failed\n", __func__); return rc; } enable_irq(adc_pmic->adc_irq); rc = pm8xxx_writeb(adc_pmic->dev->parent, ADC_ARB_USRP_CNTRL, data_arb_cntrl); if (rc < 0) { pr_debug("%s: PM8058 write failed\n", __func__); return rc; } return 0; } int32_t pm8058_xoadc_select_chan_and_start_conv(uint32_t adc_instance, struct adc_conv_slot *slot) { struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance]; struct xoadc_conv_state *slot_state = adc_pmic->conv_queue_list; if (!xoadc_initialized) return -ENODEV; mutex_lock(&slot_state->list_lock); list_add_tail(&slot->list, &slot_state->slots); if (adc_pmic->xoadc_queue_count == 0) { if (adc_pmic->pdata->xoadc_vreg_set != NULL) adc_pmic->pdata->xoadc_vreg_set(1); pm8058_xoadc_configure(adc_instance, slot); } adc_pmic->xoadc_queue_count++; mutex_unlock(&slot_state->list_lock); return 0; } EXPORT_SYMBOL(pm8058_xoadc_select_chan_and_start_conv); static int32_t pm8058_xoadc_dequeue_slot_request(uint32_t adc_instance, struct adc_conv_slot **slot) { struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance]; struct xoadc_conv_state *slot_state = adc_pmic->conv_queue_list; int rc = 0; mutex_lock(&slot_state->list_lock); if (adc_pmic->xoadc_queue_count > 0 && !list_empty(&slot_state->slots)) { *slot = list_first_entry(&slot_state->slots, struct adc_conv_slot, list); list_del(&(*slot)->list); } else rc = -EINVAL; mutex_unlock(&slot_state->list_lock); if (rc < 0) { if (pm8058_xoadc_can_print()) pr_err("Pmic 8058 xoadc spurious interrupt detected\n"); return rc; } return 0; } int32_t pm8058_xoadc_read_adc_code(uint32_t adc_instance, int32_t *data) { struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance]; struct xoadc_conv_state *slot_state = adc_pmic->conv_queue_list; uint8_t rslt_lsb, rslt_msb; struct adc_conv_slot *slot; int32_t rc, max_ideal_adc_code = 1 << adc_pmic->adc_prop->bitresolution; if (!xoadc_initialized) return -ENODEV; rc = pm8xxx_readb(adc_pmic->dev->parent, ADC_ARB_USRP_DATA0, &rslt_lsb); if (rc < 0) { pr_debug("%s: PM8058 read failed\n", __func__); return rc; } rc = pm8xxx_readb(adc_pmic->dev->parent, ADC_ARB_USRP_DATA1, &rslt_msb); if (rc < 0) { pr_debug("%s: PM8058 read failed\n", __func__); return rc; } *data = (rslt_msb << 8) | rslt_lsb; /* Use the midpoint to determine underflow or overflow */ if (*data > max_ideal_adc_code + (max_ideal_adc_code >> 1)) *data |= ((1 << (8 * sizeof(*data) - adc_pmic->adc_prop->bitresolution)) - 1) << adc_pmic->adc_prop->bitresolution; /* Return if this is a calibration run since there * is no need to check requests in the waiting queue */ if (xoadc_calib_first_adc) return 0; mutex_lock(&slot_state->list_lock); adc_pmic->xoadc_queue_count--; if (adc_pmic->xoadc_queue_count > 0) { slot = list_first_entry(&slot_state->slots, struct adc_conv_slot, list); pm8058_xoadc_configure(adc_instance, slot); } mutex_unlock(&slot_state->list_lock); mutex_lock(&slot_state->list_lock); /* Default value for switching off the arbiter after reading the ADC value. Bit 0 set to 0. */ if (adc_pmic->xoadc_queue_count == 0) { rc = pm8058_xoadc_arb_cntrl(0, adc_instance, CHANNEL_MUXOFF); if (rc < 0) { pr_debug("%s: Configuring ADC Arbiter disable" "failed\n", __func__); return rc; } if (adc_pmic->pdata->xoadc_vreg_set != NULL) adc_pmic->pdata->xoadc_vreg_set(0); } mutex_unlock(&slot_state->list_lock); return 0; } EXPORT_SYMBOL(pm8058_xoadc_read_adc_code); static irqreturn_t pm8058_xoadc(int irq, void *dev_id) { struct pmic8058_adc *xoadc_8058 = dev_id; struct adc_conv_slot *slot = NULL; int rc; disable_irq_nosync(xoadc_8058->adc_irq); if (xoadc_calib_first_adc) return IRQ_HANDLED; rc = pm8058_xoadc_dequeue_slot_request(xoadc_8058->xoadc_num, &slot); if (rc < 0) return IRQ_NONE; if (rc == 0) msm_adc_conv_cb(slot, 0, NULL, 0); return IRQ_HANDLED; } struct adc_properties *pm8058_xoadc_get_properties(uint32_t dev_instance) { struct pmic8058_adc *xoadc_8058 = pmic_adc[dev_instance]; return xoadc_8058->adc_prop; } EXPORT_SYMBOL(pm8058_xoadc_get_properties); int32_t pm8058_xoadc_calib_device(uint32_t adc_instance) { struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance]; struct adc_conv_slot *slot; int rc, offset_xoadc, slope_xoadc, calib_read_1, calib_read_2; if (adc_pmic->pdata->xoadc_vreg_set != NULL) adc_pmic->pdata->xoadc_vreg_set(1); pm8058_xoadc_slot_request(adc_instance, &slot); if (slot) { slot->chan_path = CHAN_PATH_TYPE13; slot->chan_adc_config = ADC_CONFIG_TYPE2; slot->chan_adc_calib = ADC_CONFIG_TYPE2; xoadc_calib_first_adc = true; rc = pm8058_xoadc_configure(adc_instance, slot); if (rc) { pr_err("pm8058_xoadc configure failed\n"); goto fail; } } else { rc = -EINVAL; goto fail; } msleep(3); rc = pm8058_xoadc_read_adc_code(adc_instance, &calib_read_1); if (rc) { pr_err("pm8058_xoadc read adc failed\n"); xoadc_calib_first_adc = false; goto fail; } xoadc_calib_first_adc = false; pm8058_xoadc_slot_request(adc_instance, &slot); if (slot) { slot->chan_path = CHAN_PATH_TYPE15; slot->chan_adc_config = ADC_CONFIG_TYPE2; slot->chan_adc_calib = ADC_CONFIG_TYPE2; xoadc_calib_first_adc = true; rc = pm8058_xoadc_configure(adc_instance, slot); if (rc) { pr_err("pm8058_xoadc configure failed\n"); goto fail; } } else { rc = -EINVAL; goto fail; } msleep(3); rc = pm8058_xoadc_read_adc_code(adc_instance, &calib_read_2); if (rc) { pr_err("pm8058_xoadc read adc failed\n"); xoadc_calib_first_adc = false; goto fail; } xoadc_calib_first_adc = false; pm8058_xoadc_restore_slot(adc_instance, slot); slope_xoadc = (((calib_read_1 - calib_read_2) << 10)/ CHANNEL_ADC_625_MV); offset_xoadc = calib_read_2 - ((slope_xoadc * CHANNEL_ADC_625_MV) >> 10); printk(KERN_INFO"pmic8058_xoadc:The offset for AMUX calibration" "was %d\n", offset_xoadc); adc_pmic->adc_graph[0].offset = offset_xoadc; adc_pmic->adc_graph[0].dy = (calib_read_1 - calib_read_2); adc_pmic->adc_graph[0].dx = CHANNEL_ADC_625_MV; /* Retain ideal calibration settings for therm readings */ adc_pmic->adc_graph[1].offset = 0 ; adc_pmic->adc_graph[1].dy = (1 << 15) - 1; adc_pmic->adc_graph[1].dx = 2200; if (adc_pmic->pdata->xoadc_vreg_set != NULL) adc_pmic->pdata->xoadc_vreg_set(0); return 0; fail: if (adc_pmic->pdata->xoadc_vreg_set != NULL) adc_pmic->pdata->xoadc_vreg_set(0); return rc; } EXPORT_SYMBOL(pm8058_xoadc_calib_device); int32_t pm8058_xoadc_calibrate(uint32_t dev_instance, struct adc_conv_slot *slot, int *calib_status) { *calib_status = CALIB_NOT_REQUIRED; return 0; } EXPORT_SYMBOL(pm8058_xoadc_calibrate); #ifdef CONFIG_PM static int pm8058_xoadc_suspend_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pmic8058_adc *adc_pmic = platform_get_drvdata(pdev); adc_pmic->msm_suspend_check = 1; return 0; } static int pm8058_xoadc_resume_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pmic8058_adc *adc_pmic = platform_get_drvdata(pdev); adc_pmic->msm_suspend_check = 0; return 0; } static const struct dev_pm_ops pm8058_xoadc_dev_pm_ops = { .suspend_noirq = pm8058_xoadc_suspend_noirq, .resume_noirq = pm8058_xoadc_resume_noirq, }; #define PM8058_XOADC_DEV_PM_OPS (&pm8058_xoadc_dev_pm_ops) #else #define PM8058_XOADC_DEV_PM_OPS NULL #endif static int __devinit pm8058_xoadc_probe(struct platform_device *pdev) { struct xoadc_platform_data *pdata = pdev->dev.platform_data; struct pmic8058_adc *adc_pmic; int i, rc = 0; if (!pdata) { dev_err(&pdev->dev, "no platform data?\n"); return -EINVAL; } adc_pmic = devm_kzalloc(&pdev->dev, sizeof(*adc_pmic), GFP_KERNEL); if (!adc_pmic) { dev_err(&pdev->dev, "Unable to allocate memory\n"); return -ENOMEM; } adc_pmic->dev = &pdev->dev; adc_pmic->adc_prop = pdata->xoadc_prop; adc_pmic->xoadc_num = pdata->xoadc_num; adc_pmic->xoadc_queue_count = 0; platform_set_drvdata(pdev, adc_pmic); if (adc_pmic->xoadc_num > XOADC_PMIC_0) { dev_err(&pdev->dev, "ADC device not supported\n"); return -EINVAL; } adc_pmic->pdata = pdata; adc_pmic->adc_graph = devm_kzalloc(&pdev->dev, sizeof(struct linear_graph) * MAX_CHANNEL_PROPERTIES_QUEUE, GFP_KERNEL); if (!adc_pmic->adc_graph) { dev_err(&pdev->dev, "Unable to allocate memory\n"); return -ENOMEM; } /* Will be replaced by individual channel calibration */ for (i = 0; i < MAX_CHANNEL_PROPERTIES_QUEUE; i++) { adc_pmic->adc_graph[i].offset = 0 ; adc_pmic->adc_graph[i].dy = (1 << 15) - 1; adc_pmic->adc_graph[i].dx = 2200; } if (pdata->xoadc_mpp_config != NULL) pdata->xoadc_mpp_config(); adc_pmic->conv_slot_request = &adc_pmic->conv[0]; adc_pmic->conv_slot_request->context = &adc_pmic->conv_queue_elements[0]; mutex_init(&adc_pmic->conv_slot_request->list_lock); INIT_LIST_HEAD(&adc_pmic->conv_slot_request->slots); /* tie each slot and initwork them */ for (i = 0; i < MAX_QUEUE_LENGTH; i++) { list_add(&adc_pmic->conv_slot_request->context[i].list, &adc_pmic->conv_slot_request->slots); INIT_WORK(&adc_pmic->conv_slot_request->context[i].work, msm_adc_wq_work); init_completion(&adc_pmic->conv_slot_request->context[i].comp); adc_pmic->conv_slot_request->context[i].idx = i; } adc_pmic->conv_queue_list = &adc_pmic->conv[1]; mutex_init(&adc_pmic->conv_queue_list->list_lock); INIT_LIST_HEAD(&adc_pmic->conv_queue_list->slots); adc_pmic->adc_irq = platform_get_irq(pdev, 0); if (adc_pmic->adc_irq < 0) return -ENXIO; rc = request_threaded_irq(adc_pmic->adc_irq, NULL, pm8058_xoadc, IRQF_TRIGGER_RISING, "pm8058_adc_interrupt", adc_pmic); if (rc) { dev_err(&pdev->dev, "failed to request adc irq\n"); return rc; } disable_irq(adc_pmic->adc_irq); if (adc_pmic->adc_voter == NULL) { adc_pmic->adc_voter = msm_xo_get(MSM_XO_TCXO_D1, "pmic8058_xoadc"); if (IS_ERR(adc_pmic->adc_voter)) { dev_err(&pdev->dev, "Failed to get XO vote\n"); return PTR_ERR(adc_pmic->adc_voter); } } device_init_wakeup(&pdev->dev, pdata->xoadc_wakeup); wake_lock_init(&adc_pmic->adc_wakelock, WAKE_LOCK_SUSPEND, "pmic8058_xoadc_wakelock"); pmic_adc[adc_pmic->xoadc_num] = adc_pmic; if (pdata->xoadc_vreg_setup != NULL) pdata->xoadc_vreg_setup(); xoadc_initialized = true; xoadc_calib_first_adc = false; return 0; } static int __devexit pm8058_xoadc_teardown(struct platform_device *pdev) { struct pmic8058_adc *adc_pmic = platform_get_drvdata(pdev); if (adc_pmic->pdata->xoadc_vreg_shutdown != NULL) adc_pmic->pdata->xoadc_vreg_shutdown(); wake_lock_destroy(&adc_pmic->adc_wakelock); msm_xo_put(adc_pmic->adc_voter); device_init_wakeup(&pdev->dev, 0); xoadc_initialized = false; return 0; } static struct platform_driver pm8058_xoadc_driver = { .probe = pm8058_xoadc_probe, .remove = __devexit_p(pm8058_xoadc_teardown), .driver = { .name = "pm8058-xoadc", .owner = THIS_MODULE, .pm = PM8058_XOADC_DEV_PM_OPS, }, }; static int __init pm8058_xoadc_init(void) { return platform_driver_register(&pm8058_xoadc_driver); } module_init(pm8058_xoadc_init); static void __exit pm8058_xoadc_exit(void) { platform_driver_unregister(&pm8058_xoadc_driver); } module_exit(pm8058_xoadc_exit); MODULE_ALIAS("platform:pmic8058_xoadc"); MODULE_DESCRIPTION("PMIC8058 XOADC driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
grogg/platform_device_asus_flo-kernel_kernel
arch/mips/pci/pci-alchemy.c
4681
14005
/* * Alchemy PCI host mode support. * * Copyright 2001-2003, 2007-2008 MontaVista Software Inc. * Author: MontaVista Software, Inc. <source@mvista.com> * * Support for all devices (greater than 16) added by David Gathright. */ #include <linux/export.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/syscore_ops.h> #include <linux/vmalloc.h> #include <asm/mach-au1x00/au1000.h> #include <asm/tlbmisc.h> #ifdef CONFIG_DEBUG_PCI #define DBG(x...) printk(KERN_DEBUG x) #else #define DBG(x...) do {} while (0) #endif #define PCI_ACCESS_READ 0 #define PCI_ACCESS_WRITE 1 struct alchemy_pci_context { struct pci_controller alchemy_pci_ctrl; /* leave as first member! */ void __iomem *regs; /* ctrl base */ /* tools for wired entry for config space access */ unsigned long last_elo0; unsigned long last_elo1; int wired_entry; struct vm_struct *pci_cfg_vm; unsigned long pm[12]; int (*board_map_irq)(const struct pci_dev *d, u8 slot, u8 pin); int (*board_pci_idsel)(unsigned int devsel, int assert); }; /* for syscore_ops. There's only one PCI controller on Alchemy chips, so this * should suffice for now. */ static struct alchemy_pci_context *__alchemy_pci_ctx; /* IO/MEM resources for PCI. Keep the memres in sync with __fixup_bigphys_addr * in arch/mips/alchemy/common/setup.c */ static struct resource alchemy_pci_def_memres = { .start = ALCHEMY_PCI_MEMWIN_START, .end = ALCHEMY_PCI_MEMWIN_END, .name = "PCI memory space", .flags = IORESOURCE_MEM }; static struct resource alchemy_pci_def_iores = { .start = ALCHEMY_PCI_IOWIN_START, .end = ALCHEMY_PCI_IOWIN_END, .name = "PCI IO space", .flags = IORESOURCE_IO }; static void mod_wired_entry(int entry, unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) { unsigned long old_pagemask; unsigned long old_ctx; /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi() & 0xff; old_pagemask = read_c0_pagemask(); write_c0_index(entry); write_c0_pagemask(pagemask); write_c0_entryhi(entryhi); write_c0_entrylo0(entrylo0); write_c0_entrylo1(entrylo1); tlb_write_indexed(); write_c0_entryhi(old_ctx); write_c0_pagemask(old_pagemask); } static void alchemy_pci_wired_entry(struct alchemy_pci_context *ctx) { ctx->wired_entry = read_c0_wired(); add_wired_entry(0, 0, (unsigned long)ctx->pci_cfg_vm->addr, PM_4K); ctx->last_elo0 = ctx->last_elo1 = ~0; } static int config_access(unsigned char access_type, struct pci_bus *bus, unsigned int dev_fn, unsigned char where, u32 *data) { struct alchemy_pci_context *ctx = bus->sysdata; unsigned int device = PCI_SLOT(dev_fn); unsigned int function = PCI_FUNC(dev_fn); unsigned long offset, status, cfg_base, flags, entryLo0, entryLo1, r; int error = PCIBIOS_SUCCESSFUL; if (device > 19) { *data = 0xffffffff; return -1; } local_irq_save(flags); r = __raw_readl(ctx->regs + PCI_REG_STATCMD) & 0x0000ffff; r |= PCI_STATCMD_STATUS(0x2000); __raw_writel(r, ctx->regs + PCI_REG_STATCMD); wmb(); /* Allow board vendors to implement their own off-chip IDSEL. * If it doesn't succeed, may as well bail out at this point. */ if (ctx->board_pci_idsel(device, 1) == 0) { *data = 0xffffffff; local_irq_restore(flags); return -1; } /* Setup the config window */ if (bus->number == 0) cfg_base = (1 << device) << 11; else cfg_base = 0x80000000 | (bus->number << 16) | (device << 11); /* Setup the lower bits of the 36-bit address */ offset = (function << 8) | (where & ~0x3); /* Pick up any address that falls below the page mask */ offset |= cfg_base & ~PAGE_MASK; /* Page boundary */ cfg_base = cfg_base & PAGE_MASK; /* To improve performance, if the current device is the same as * the last device accessed, we don't touch the TLB. */ entryLo0 = (6 << 26) | (cfg_base >> 6) | (2 << 3) | 7; entryLo1 = (6 << 26) | (cfg_base >> 6) | (0x1000 >> 6) | (2 << 3) | 7; if ((entryLo0 != ctx->last_elo0) || (entryLo1 != ctx->last_elo1)) { mod_wired_entry(ctx->wired_entry, entryLo0, entryLo1, (unsigned long)ctx->pci_cfg_vm->addr, PM_4K); ctx->last_elo0 = entryLo0; ctx->last_elo1 = entryLo1; } if (access_type == PCI_ACCESS_WRITE) __raw_writel(*data, ctx->pci_cfg_vm->addr + offset); else *data = __raw_readl(ctx->pci_cfg_vm->addr + offset); wmb(); DBG("alchemy-pci: cfg access %d bus %u dev %u at %x dat %x conf %lx\n", access_type, bus->number, device, where, *data, offset); /* check for errors, master abort */ status = __raw_readl(ctx->regs + PCI_REG_STATCMD); if (status & (1 << 29)) { *data = 0xffffffff; error = -1; DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d", access_type, bus->number, device); } else if ((status >> 28) & 0xf) { DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n", device, (status >> 28) & 0xf); /* clear errors */ __raw_writel(status & 0xf000ffff, ctx->regs + PCI_REG_STATCMD); *data = 0xffffffff; error = -1; } /* Take away the IDSEL. */ (void)ctx->board_pci_idsel(device, 0); local_irq_restore(flags); return error; } static int read_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 *val) { u32 data; int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data); if (where & 1) data >>= 8; if (where & 2) data >>= 16; *val = data & 0xff; return ret; } static int read_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 *val) { u32 data; int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data); if (where & 2) data >>= 16; *val = data & 0xffff; return ret; } static int read_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 *val) { return config_access(PCI_ACCESS_READ, bus, devfn, where, val); } static int write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 val) { u32 data = 0; if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) return -1; data = (data & ~(0xff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) return -1; return PCIBIOS_SUCCESSFUL; } static int write_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 val) { u32 data = 0; if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) return -1; data = (data & ~(0xffff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) return -1; return PCIBIOS_SUCCESSFUL; } static int write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 val) { return config_access(PCI_ACCESS_WRITE, bus, devfn, where, &val); } static int alchemy_pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { switch (size) { case 1: { u8 _val; int rc = read_config_byte(bus, devfn, where, &_val); *val = _val; return rc; } case 2: { u16 _val; int rc = read_config_word(bus, devfn, where, &_val); *val = _val; return rc; } default: return read_config_dword(bus, devfn, where, val); } } static int alchemy_pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { switch (size) { case 1: return write_config_byte(bus, devfn, where, (u8) val); case 2: return write_config_word(bus, devfn, where, (u16) val); default: return write_config_dword(bus, devfn, where, val); } } static struct pci_ops alchemy_pci_ops = { .read = alchemy_pci_read, .write = alchemy_pci_write, }; static int alchemy_pci_def_idsel(unsigned int devsel, int assert) { return 1; /* success */ } /* save PCI controller register contents. */ static int alchemy_pci_suspend(void) { struct alchemy_pci_context *ctx = __alchemy_pci_ctx; if (!ctx) return 0; ctx->pm[0] = __raw_readl(ctx->regs + PCI_REG_CMEM); ctx->pm[1] = __raw_readl(ctx->regs + PCI_REG_CONFIG) & 0x0009ffff; ctx->pm[2] = __raw_readl(ctx->regs + PCI_REG_B2BMASK_CCH); ctx->pm[3] = __raw_readl(ctx->regs + PCI_REG_B2BBASE0_VID); ctx->pm[4] = __raw_readl(ctx->regs + PCI_REG_B2BBASE1_SID); ctx->pm[5] = __raw_readl(ctx->regs + PCI_REG_MWMASK_DEV); ctx->pm[6] = __raw_readl(ctx->regs + PCI_REG_MWBASE_REV_CCL); ctx->pm[7] = __raw_readl(ctx->regs + PCI_REG_ID); ctx->pm[8] = __raw_readl(ctx->regs + PCI_REG_CLASSREV); ctx->pm[9] = __raw_readl(ctx->regs + PCI_REG_PARAM); ctx->pm[10] = __raw_readl(ctx->regs + PCI_REG_MBAR); ctx->pm[11] = __raw_readl(ctx->regs + PCI_REG_TIMEOUT); return 0; } static void alchemy_pci_resume(void) { struct alchemy_pci_context *ctx = __alchemy_pci_ctx; if (!ctx) return; __raw_writel(ctx->pm[0], ctx->regs + PCI_REG_CMEM); __raw_writel(ctx->pm[2], ctx->regs + PCI_REG_B2BMASK_CCH); __raw_writel(ctx->pm[3], ctx->regs + PCI_REG_B2BBASE0_VID); __raw_writel(ctx->pm[4], ctx->regs + PCI_REG_B2BBASE1_SID); __raw_writel(ctx->pm[5], ctx->regs + PCI_REG_MWMASK_DEV); __raw_writel(ctx->pm[6], ctx->regs + PCI_REG_MWBASE_REV_CCL); __raw_writel(ctx->pm[7], ctx->regs + PCI_REG_ID); __raw_writel(ctx->pm[8], ctx->regs + PCI_REG_CLASSREV); __raw_writel(ctx->pm[9], ctx->regs + PCI_REG_PARAM); __raw_writel(ctx->pm[10], ctx->regs + PCI_REG_MBAR); __raw_writel(ctx->pm[11], ctx->regs + PCI_REG_TIMEOUT); wmb(); __raw_writel(ctx->pm[1], ctx->regs + PCI_REG_CONFIG); wmb(); /* YAMON on all db1xxx boards wipes the TLB and writes zero to C0_wired * on resume, making it necessary to recreate it as soon as possible. */ ctx->wired_entry = 8191; /* impossibly high value */ alchemy_pci_wired_entry(ctx); /* install it */ } static struct syscore_ops alchemy_pci_pmops = { .suspend = alchemy_pci_suspend, .resume = alchemy_pci_resume, }; static int __devinit alchemy_pci_probe(struct platform_device *pdev) { struct alchemy_pci_platdata *pd = pdev->dev.platform_data; struct alchemy_pci_context *ctx; void __iomem *virt_io; unsigned long val; struct resource *r; int ret; /* need at least PCI IRQ mapping table */ if (!pd) { dev_err(&pdev->dev, "need platform data for PCI setup\n"); ret = -ENODEV; goto out; } ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { dev_err(&pdev->dev, "no memory for pcictl context\n"); ret = -ENOMEM; goto out; } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { dev_err(&pdev->dev, "no pcictl ctrl regs resource\n"); ret = -ENODEV; goto out1; } if (!request_mem_region(r->start, resource_size(r), pdev->name)) { dev_err(&pdev->dev, "cannot claim pci regs\n"); ret = -ENODEV; goto out1; } ctx->regs = ioremap_nocache(r->start, resource_size(r)); if (!ctx->regs) { dev_err(&pdev->dev, "cannot map pci regs\n"); ret = -ENODEV; goto out2; } /* map parts of the PCI IO area */ /* REVISIT: if this changes with a newer variant (doubt it) make this * a platform resource. */ virt_io = ioremap(AU1500_PCI_IO_PHYS_ADDR, 0x00100000); if (!virt_io) { dev_err(&pdev->dev, "cannot remap pci io space\n"); ret = -ENODEV; goto out3; } ctx->alchemy_pci_ctrl.io_map_base = (unsigned long)virt_io; #ifdef CONFIG_DMA_NONCOHERENT /* Au1500 revisions older than AD have borked coherent PCI */ if ((alchemy_get_cputype() == ALCHEMY_CPU_AU1500) && (read_c0_prid() < 0x01030202)) { val = __raw_readl(ctx->regs + PCI_REG_CONFIG); val |= PCI_CONFIG_NC; __raw_writel(val, ctx->regs + PCI_REG_CONFIG); wmb(); dev_info(&pdev->dev, "non-coherent PCI on Au1500 AA/AB/AC\n"); } #endif if (pd->board_map_irq) ctx->board_map_irq = pd->board_map_irq; if (pd->board_pci_idsel) ctx->board_pci_idsel = pd->board_pci_idsel; else ctx->board_pci_idsel = alchemy_pci_def_idsel; /* fill in relevant pci_controller members */ ctx->alchemy_pci_ctrl.pci_ops = &alchemy_pci_ops; ctx->alchemy_pci_ctrl.mem_resource = &alchemy_pci_def_memres; ctx->alchemy_pci_ctrl.io_resource = &alchemy_pci_def_iores; /* we can't ioremap the entire pci config space because it's too large, * nor can we dynamically ioremap it because some drivers use the * PCI config routines from within atomic contex and that becomes a * problem in get_vm_area(). Instead we use one wired TLB entry to * handle all config accesses for all busses. */ ctx->pci_cfg_vm = get_vm_area(0x2000, VM_IOREMAP); if (!ctx->pci_cfg_vm) { dev_err(&pdev->dev, "unable to get vm area\n"); ret = -ENOMEM; goto out4; } ctx->wired_entry = 8191; /* impossibly high value */ alchemy_pci_wired_entry(ctx); /* install it */ set_io_port_base((unsigned long)ctx->alchemy_pci_ctrl.io_map_base); /* board may want to modify bits in the config register, do it now */ val = __raw_readl(ctx->regs + PCI_REG_CONFIG); val &= ~pd->pci_cfg_clr; val |= pd->pci_cfg_set; val &= ~PCI_CONFIG_PD; /* clear disable bit */ __raw_writel(val, ctx->regs + PCI_REG_CONFIG); wmb(); __alchemy_pci_ctx = ctx; platform_set_drvdata(pdev, ctx); register_syscore_ops(&alchemy_pci_pmops); register_pci_controller(&ctx->alchemy_pci_ctrl); return 0; out4: iounmap(virt_io); out3: iounmap(ctx->regs); out2: release_mem_region(r->start, resource_size(r)); out1: kfree(ctx); out: return ret; } static struct platform_driver alchemy_pcictl_driver = { .probe = alchemy_pci_probe, .driver = { .name = "alchemy-pci", .owner = THIS_MODULE, }, }; static int __init alchemy_pci_init(void) { /* Au1500/Au1550 have PCI */ switch (alchemy_get_cputype()) { case ALCHEMY_CPU_AU1500: case ALCHEMY_CPU_AU1550: return platform_driver_register(&alchemy_pcictl_driver); } return 0; } arch_initcall(alchemy_pci_init); int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct alchemy_pci_context *ctx = dev->sysdata; if (ctx && ctx->board_map_irq) return ctx->board_map_irq(dev, slot, pin); return -1; } int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; }
gpl-2.0
keyser84/android_kernel_motorola_msm8226
drivers/watchdog/mpc8xxx_wdt.c
4937
8281
/* * mpc8xxx_wdt.c - MPC8xx/MPC83xx/MPC86xx watchdog userspace interface * * Authors: Dave Updegraff <dave@cray.org> * Kumar Gala <galak@kernel.crashing.org> * Attribution: from 83xx_wst: Florian Schirmer <jolt@tuxbox.org> * ..and from sc520_wdt * Copyright (c) 2008 MontaVista Software, Inc. * Anton Vorontsov <avorontsov@ru.mvista.com> * * Note: it appears that you can only actually ENABLE or DISABLE the thing * once after POR. Once enabled, you cannot disable, and vice versa. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/miscdevice.h> #include <linux/of_platform.h> #include <linux/module.h> #include <linux/watchdog.h> #include <linux/io.h> #include <linux/uaccess.h> #include <sysdev/fsl_soc.h> struct mpc8xxx_wdt { __be32 res0; __be32 swcrr; /* System watchdog control register */ #define SWCRR_SWTC 0xFFFF0000 /* Software Watchdog Time Count. */ #define SWCRR_SWEN 0x00000004 /* Watchdog Enable bit. */ #define SWCRR_SWRI 0x00000002 /* Software Watchdog Reset/Interrupt Select bit.*/ #define SWCRR_SWPR 0x00000001 /* Software Watchdog Counter Prescale bit. */ __be32 swcnr; /* System watchdog count register */ u8 res1[2]; __be16 swsrr; /* System watchdog service register */ u8 res2[0xF0]; }; struct mpc8xxx_wdt_type { int prescaler; bool hw_enabled; }; static struct mpc8xxx_wdt __iomem *wd_base; static int mpc8xxx_wdt_init_late(void); static u16 timeout = 0xffff; module_param(timeout, ushort, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in ticks. (0<timeout<65536, default=65535)"); static bool reset = 1; module_param(reset, bool, 0); MODULE_PARM_DESC(reset, "Watchdog Interrupt/Reset Mode. 0 = interrupt, 1 = reset"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* * We always prescale, but if someone really doesn't want to they can set this * to 0 */ static int prescale = 1; static unsigned int timeout_sec; static unsigned long wdt_is_open; static DEFINE_SPINLOCK(wdt_spinlock); static void mpc8xxx_wdt_keepalive(void) { /* Ping the WDT */ spin_lock(&wdt_spinlock); out_be16(&wd_base->swsrr, 0x556c); out_be16(&wd_base->swsrr, 0xaa39); spin_unlock(&wdt_spinlock); } static void mpc8xxx_wdt_timer_ping(unsigned long arg); static DEFINE_TIMER(wdt_timer, mpc8xxx_wdt_timer_ping, 0, 0); static void mpc8xxx_wdt_timer_ping(unsigned long arg) { mpc8xxx_wdt_keepalive(); /* We're pinging it twice faster than needed, just to be sure. */ mod_timer(&wdt_timer, jiffies + HZ * timeout_sec / 2); } static void mpc8xxx_wdt_pr_warn(const char *msg) { pr_crit("%s, expect the %s soon!\n", msg, reset ? "reset" : "machine check exception"); } static ssize_t mpc8xxx_wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { if (count) mpc8xxx_wdt_keepalive(); return count; } static int mpc8xxx_wdt_open(struct inode *inode, struct file *file) { u32 tmp = SWCRR_SWEN; if (test_and_set_bit(0, &wdt_is_open)) return -EBUSY; /* Once we start the watchdog we can't stop it */ if (nowayout) __module_get(THIS_MODULE); /* Good, fire up the show */ if (prescale) tmp |= SWCRR_SWPR; if (reset) tmp |= SWCRR_SWRI; tmp |= timeout << 16; out_be32(&wd_base->swcrr, tmp); del_timer_sync(&wdt_timer); return nonseekable_open(inode, file); } static int mpc8xxx_wdt_release(struct inode *inode, struct file *file) { if (!nowayout) mpc8xxx_wdt_timer_ping(0); else mpc8xxx_wdt_pr_warn("watchdog closed"); clear_bit(0, &wdt_is_open); return 0; } static long mpc8xxx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING, .firmware_version = 1, .identity = "MPC8xxx", }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_KEEPALIVE: mpc8xxx_wdt_keepalive(); return 0; case WDIOC_GETTIMEOUT: return put_user(timeout_sec, p); default: return -ENOTTY; } } static const struct file_operations mpc8xxx_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = mpc8xxx_wdt_write, .unlocked_ioctl = mpc8xxx_wdt_ioctl, .open = mpc8xxx_wdt_open, .release = mpc8xxx_wdt_release, }; static struct miscdevice mpc8xxx_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &mpc8xxx_wdt_fops, }; static const struct of_device_id mpc8xxx_wdt_match[]; static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev) { int ret; const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct mpc8xxx_wdt_type *wdt_type; u32 freq = fsl_get_sys_freq(); bool enabled; match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev); if (!match) return -EINVAL; wdt_type = match->data; if (!freq || freq == -1) return -EINVAL; wd_base = of_iomap(np, 0); if (!wd_base) return -ENOMEM; enabled = in_be32(&wd_base->swcrr) & SWCRR_SWEN; if (!enabled && wdt_type->hw_enabled) { pr_info("could not be enabled in software\n"); ret = -ENOSYS; goto err_unmap; } /* Calculate the timeout in seconds */ if (prescale) timeout_sec = (timeout * wdt_type->prescaler) / freq; else timeout_sec = timeout / freq; #ifdef MODULE ret = mpc8xxx_wdt_init_late(); if (ret) goto err_unmap; #endif pr_info("WDT driver for MPC8xxx initialized. mode:%s timeout=%d (%d seconds)\n", reset ? "reset" : "interrupt", timeout, timeout_sec); /* * If the watchdog was previously enabled or we're running on * MPC8xxx, we should ping the wdt from the kernel until the * userspace handles it. */ if (enabled) mpc8xxx_wdt_timer_ping(0); return 0; err_unmap: iounmap(wd_base); wd_base = NULL; return ret; } static int __devexit mpc8xxx_wdt_remove(struct platform_device *ofdev) { mpc8xxx_wdt_pr_warn("watchdog removed"); del_timer_sync(&wdt_timer); misc_deregister(&mpc8xxx_wdt_miscdev); iounmap(wd_base); return 0; } static const struct of_device_id mpc8xxx_wdt_match[] = { { .compatible = "mpc83xx_wdt", .data = &(struct mpc8xxx_wdt_type) { .prescaler = 0x10000, }, }, { .compatible = "fsl,mpc8610-wdt", .data = &(struct mpc8xxx_wdt_type) { .prescaler = 0x10000, .hw_enabled = true, }, }, { .compatible = "fsl,mpc823-wdt", .data = &(struct mpc8xxx_wdt_type) { .prescaler = 0x800, }, }, {}, }; MODULE_DEVICE_TABLE(of, mpc8xxx_wdt_match); static struct platform_driver mpc8xxx_wdt_driver = { .probe = mpc8xxx_wdt_probe, .remove = __devexit_p(mpc8xxx_wdt_remove), .driver = { .name = "mpc8xxx_wdt", .owner = THIS_MODULE, .of_match_table = mpc8xxx_wdt_match, }, }; /* * We do wdt initialization in two steps: arch_initcall probes the wdt * very early to start pinging the watchdog (misc devices are not yet * available), and later module_init() just registers the misc device. */ static int mpc8xxx_wdt_init_late(void) { int ret; if (!wd_base) return -ENODEV; ret = misc_register(&mpc8xxx_wdt_miscdev); if (ret) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); return ret; } return 0; } #ifndef MODULE module_init(mpc8xxx_wdt_init_late); #endif static int __init mpc8xxx_wdt_init(void) { return platform_driver_register(&mpc8xxx_wdt_driver); } arch_initcall(mpc8xxx_wdt_init); static void __exit mpc8xxx_wdt_exit(void) { platform_driver_unregister(&mpc8xxx_wdt_driver); } module_exit(mpc8xxx_wdt_exit); MODULE_AUTHOR("Dave Updegraff, Kumar Gala"); MODULE_DESCRIPTION("Driver for watchdog timer in MPC8xx/MPC83xx/MPC86xx " "uProcessors"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
mzhou/lge-kernel-p880-cyanogenmod
drivers/staging/vt6656/hostap.c
6217
23798
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: hostap.c * * Purpose: handle hostap deamon ioctl input/out functions * * Author: Lyndon Chen * * Date: Oct. 20, 2003 * * Functions: * * Revision History: * */ #include "hostap.h" #include "iocmd.h" #include "mac.h" #include "card.h" #include "baseband.h" #include "wpactl.h" #include "key.h" #include "datarate.h" #define VIAWGET_HOSTAPD_MAX_BUF_SIZE 1024 #define HOSTAP_CRYPT_FLAG_SET_TX_KEY BIT0 #define HOSTAP_CRYPT_FLAG_PERMANENT BIT1 #define HOSTAP_CRYPT_ERR_UNKNOWN_ALG 2 #define HOSTAP_CRYPT_ERR_UNKNOWN_ADDR 3 #define HOSTAP_CRYPT_ERR_CRYPT_INIT_FAILED 4 #define HOSTAP_CRYPT_ERR_KEY_SET_FAILED 5 #define HOSTAP_CRYPT_ERR_TX_KEY_SET_FAILED 6 #define HOSTAP_CRYPT_ERR_CARD_CONF_FAILED 7 /*--------------------- Static Definitions -------------------------*/ /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ //static int msglevel =MSG_LEVEL_DEBUG; static int msglevel =MSG_LEVEL_INFO; /*--------------------- Static Functions --------------------------*/ /*--------------------- Export Variables --------------------------*/ /* * Description: * register net_device (AP) for hostap deamon * * Parameters: * In: * pDevice - * rtnl_locked - * Out: * * Return Value: * */ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked) { PSDevice apdev_priv; struct net_device *dev = pDevice->dev; int ret; const struct net_device_ops apdev_netdev_ops = { .ndo_start_xmit = pDevice->tx_80211, }; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name); pDevice->apdev = kzalloc(sizeof(struct net_device), GFP_KERNEL); if (pDevice->apdev == NULL) return -ENOMEM; apdev_priv = netdev_priv(pDevice->apdev); *apdev_priv = *pDevice; memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN); pDevice->apdev->netdev_ops = &apdev_netdev_ops; pDevice->apdev->type = ARPHRD_IEEE80211; pDevice->apdev->base_addr = dev->base_addr; pDevice->apdev->irq = dev->irq; pDevice->apdev->mem_start = dev->mem_start; pDevice->apdev->mem_end = dev->mem_end; sprintf(pDevice->apdev->name, "%sap", dev->name); if (rtnl_locked) ret = register_netdevice(pDevice->apdev); else ret = register_netdev(pDevice->apdev); if (ret) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: register_netdevice(AP) failed!\n", dev->name); return -1; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Registered netdevice %s for AP management\n", dev->name, pDevice->apdev->name); KeyvInitTable(pDevice,&pDevice->sKey); return 0; } /* * Description: * unregister net_device(AP) * * Parameters: * In: * pDevice - * rtnl_locked - * Out: * * Return Value: * */ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: disabling hostapd mode\n", pDevice->dev->name); if (pDevice->apdev && pDevice->apdev->name && pDevice->apdev->name[0]) { if (rtnl_locked) unregister_netdevice(pDevice->apdev); else unregister_netdev(pDevice->apdev); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", pDevice->dev->name, pDevice->apdev->name); } kfree(pDevice->apdev); pDevice->apdev = NULL; pDevice->bEnable8021x = FALSE; pDevice->bEnableHostWEP = FALSE; pDevice->bEncryptionEnable = FALSE; return 0; } /* * Description: * Set enable/disable hostapd mode * * Parameters: * In: * pDevice - * rtnl_locked - * Out: * * Return Value: * */ int vt6656_hostap_set_hostapd(PSDevice pDevice, int val, int rtnl_locked) { if (val < 0 || val > 1) return -EINVAL; if (pDevice->bEnableHostapd == val) return 0; pDevice->bEnableHostapd = val; if (val) return hostap_enable_hostapd(pDevice, rtnl_locked); else return hostap_disable_hostapd(pDevice, rtnl_locked); } /* * Description: * remove station function supported for hostap deamon * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int hostap_remove_sta(PSDevice pDevice, struct viawget_hostapd_param *param) { unsigned int uNodeIndex; if (BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &uNodeIndex)) { BSSvRemoveOneNode(pDevice, uNodeIndex); } else { return -ENOENT; } return 0; } /* * Description: * add a station from hostap deamon * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int hostap_add_sta(PSDevice pDevice, struct viawget_hostapd_param *param) { PSMgmtObject pMgmt = &(pDevice->sMgmtObj); unsigned int uNodeIndex; if (!BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &uNodeIndex)) { BSSvCreateOneNode((PSDevice)pDevice, &uNodeIndex); } memcpy(pMgmt->sNodeDBTable[uNodeIndex].abyMACAddr, param->sta_addr, WLAN_ADDR_LEN); pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_ASSOC; pMgmt->sNodeDBTable[uNodeIndex].wCapInfo = param->u.add_sta.capability; // TODO listenInterval // pMgmt->sNodeDBTable[uNodeIndex].wListenInterval = 1; pMgmt->sNodeDBTable[uNodeIndex].bPSEnable = FALSE; pMgmt->sNodeDBTable[uNodeIndex].bySuppRate = param->u.add_sta.tx_supp_rates; // set max tx rate pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate = pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate; // set max basic rate pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate = RATE_2M; // Todo: check sta preamble, if ap can't support, set status code pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(pMgmt->sNodeDBTable[uNodeIndex].wCapInfo); pMgmt->sNodeDBTable[uNodeIndex].wAID = (WORD)param->u.add_sta.aid; pMgmt->sNodeDBTable[uNodeIndex].ulLastRxJiffer = jiffies; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Add STA AID= %d \n", pMgmt->sNodeDBTable[uNodeIndex].wAID); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MAC=%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X \n", param->sta_addr[0], param->sta_addr[1], param->sta_addr[2], param->sta_addr[3], param->sta_addr[4], param->sta_addr[5] ) ; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Max Support rate = %d \n", pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate); return 0; } /* * Description: * get station info * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int hostap_get_info_sta(PSDevice pDevice, struct viawget_hostapd_param *param) { PSMgmtObject pMgmt = &(pDevice->sMgmtObj); unsigned int uNodeIndex; if (BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &uNodeIndex)) { param->u.get_info_sta.inactive_sec = (jiffies - pMgmt->sNodeDBTable[uNodeIndex].ulLastRxJiffer) / HZ; //param->u.get_info_sta.txexc = pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts; } else { return -ENOENT; } return 0; } /* * Description: * reset txexec * * Parameters: * In: * pDevice - * param - * Out: * TURE, FALSE * * Return Value: * */ /* static int hostap_reset_txexc_sta(PSDevice pDevice, struct viawget_hostapd_param *param) { PSMgmtObject pMgmt = &(pDevice->sMgmtObj); unsigned int uNodeIndex; if (BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &uNodeIndex)) { pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts = 0; } else { return -ENOENT; } return 0; } */ /* * Description: * set station flag * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int hostap_set_flags_sta(PSDevice pDevice, struct viawget_hostapd_param *param) { PSMgmtObject pMgmt = &(pDevice->sMgmtObj); unsigned int uNodeIndex; if (BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &uNodeIndex)) { pMgmt->sNodeDBTable[uNodeIndex].dwFlags |= param->u.set_flags_sta.flags_or; pMgmt->sNodeDBTable[uNodeIndex].dwFlags &= param->u.set_flags_sta.flags_and; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " dwFlags = %x\n", (unsigned int) pMgmt->sNodeDBTable[uNodeIndex].dwFlags); } else { return -ENOENT; } return 0; } /* * Description: * set generic element (wpa ie) * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int hostap_set_generic_element(PSDevice pDevice, struct viawget_hostapd_param *param) { PSMgmtObject pMgmt = &(pDevice->sMgmtObj); memcpy( pMgmt->abyWPAIE, param->u.generic_elem.data, param->u.generic_elem.len ); pMgmt->wWPAIELen = param->u.generic_elem.len; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pMgmt->wWPAIELen = %d\n", pMgmt->wWPAIELen); // disable wpa if (pMgmt->wWPAIELen == 0) { pMgmt->eAuthenMode = WMAC_AUTH_OPEN; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " No WPAIE, Disable WPA \n"); } else { // enable wpa if ((pMgmt->abyWPAIE[0] == WLAN_EID_RSN_WPA) || (pMgmt->abyWPAIE[0] == WLAN_EID_RSN)) { pMgmt->eAuthenMode = WMAC_AUTH_WPANONE; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Set WPAIE enable WPA\n"); } else return -EINVAL; } return 0; } /* * Description: * flush station nodes table. * * Parameters: * In: * pDevice - * Out: * * Return Value: * */ static void hostap_flush_sta(PSDevice pDevice) { // reserved node index =0 for multicast node. BSSvClearNodeDBTable(pDevice, 1); pDevice->uAssocCount = 0; return; } /* * Description: * set each stations encryption key * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int hostap_set_encryption(PSDevice pDevice, struct viawget_hostapd_param *param, int param_len) { PSMgmtObject pMgmt = &(pDevice->sMgmtObj); DWORD dwKeyIndex = 0; BYTE abyKey[MAX_KEY_LEN]; BYTE abySeq[MAX_KEY_LEN]; NDIS_802_11_KEY_RSC KeyRSC; BYTE byKeyDecMode = KEY_CTL_WEP; int ret = 0; int iNodeIndex = -1; int ii; BOOL bKeyTableFull = FALSE; WORD wKeyCtl = 0; param->u.crypt.err = 0; /* if (param_len != (int) ((char *) param->u.crypt.key - (char *) param) + param->u.crypt.key_len) return -EINVAL; */ if (param->u.crypt.alg > WPA_ALG_CCMP) return -EINVAL; if ((param->u.crypt.idx > 3) || (param->u.crypt.key_len > MAX_KEY_LEN)) { param->u.crypt.err = HOSTAP_CRYPT_ERR_KEY_SET_FAILED; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " HOSTAP_CRYPT_ERR_KEY_SET_FAILED\n"); return -EINVAL; } if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff && param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff && param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) { if (param->u.crypt.idx >= MAX_GROUP_KEY) return -EINVAL; iNodeIndex = 0; } else { if (BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &iNodeIndex) == FALSE) { param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " HOSTAP_CRYPT_ERR_UNKNOWN_ADDR\n"); return -EINVAL; } } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " hostap_set_encryption: sta_index %d \n", iNodeIndex); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " hostap_set_encryption: alg %d \n", param->u.crypt.alg); if (param->u.crypt.alg == WPA_ALG_NONE) { if (pMgmt->sNodeDBTable[iNodeIndex].bOnFly == TRUE) { if (KeybRemoveKey( pDevice, &(pDevice->sKey), param->sta_addr, pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex ) == FALSE) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "KeybRemoveKey fail \n"); } pMgmt->sNodeDBTable[iNodeIndex].bOnFly = FALSE; } pMgmt->sNodeDBTable[iNodeIndex].byKeyIndex = 0; pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex = 0; pMgmt->sNodeDBTable[iNodeIndex].uWepKeyLength = 0; pMgmt->sNodeDBTable[iNodeIndex].KeyRSC = 0; pMgmt->sNodeDBTable[iNodeIndex].dwTSC47_16 = 0; pMgmt->sNodeDBTable[iNodeIndex].wTSC15_0 = 0; pMgmt->sNodeDBTable[iNodeIndex].byCipherSuite = 0; memset(&pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[0], 0, MAX_KEY_LEN ); return ret; } memcpy(abyKey, param->u.crypt.key, param->u.crypt.key_len); // copy to node key tbl pMgmt->sNodeDBTable[iNodeIndex].byKeyIndex = param->u.crypt.idx; pMgmt->sNodeDBTable[iNodeIndex].uWepKeyLength = param->u.crypt.key_len; memcpy(&pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[0], param->u.crypt.key, param->u.crypt.key_len ); dwKeyIndex = (DWORD)(param->u.crypt.idx); if (param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) { pDevice->byKeyIndex = (BYTE)dwKeyIndex; pDevice->bTransmitKey = TRUE; dwKeyIndex |= (1 << 31); } if (param->u.crypt.alg == WPA_ALG_WEP) { if ((pDevice->bEnable8021x == FALSE) || (iNodeIndex == 0)) { KeybSetDefaultKey( pDevice, &(pDevice->sKey), dwKeyIndex & ~(BIT30 | USE_KEYRSC), param->u.crypt.key_len, NULL, abyKey, KEY_CTL_WEP ); } else { // 8021x enable, individual key dwKeyIndex |= (1 << 30); // set pairwise key if (KeybSetKey(pDevice, &(pDevice->sKey), &param->sta_addr[0], dwKeyIndex & ~(USE_KEYRSC), param->u.crypt.key_len, (PQWORD) &(KeyRSC), (PBYTE)abyKey, KEY_CTL_WEP ) == TRUE) { pMgmt->sNodeDBTable[iNodeIndex].bOnFly = TRUE; } else { // Key Table Full pMgmt->sNodeDBTable[iNodeIndex].bOnFly = FALSE; bKeyTableFull = TRUE; } } pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled; pDevice->bEncryptionEnable = TRUE; pMgmt->byCSSPK = KEY_CTL_WEP; pMgmt->byCSSGK = KEY_CTL_WEP; pMgmt->sNodeDBTable[iNodeIndex].byCipherSuite = KEY_CTL_WEP; pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex = dwKeyIndex; return ret; } if (param->u.crypt.seq) { memcpy(&abySeq, param->u.crypt.seq, 8); for (ii = 0 ; ii < 8 ; ii++) { KeyRSC |= (abySeq[ii] << (ii * 8)); } dwKeyIndex |= 1 << 29; pMgmt->sNodeDBTable[iNodeIndex].KeyRSC = KeyRSC; } if (param->u.crypt.alg == WPA_ALG_TKIP) { if (param->u.crypt.key_len != MAX_KEY_LEN) return -EINVAL; pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; byKeyDecMode = KEY_CTL_TKIP; pMgmt->byCSSPK = KEY_CTL_TKIP; pMgmt->byCSSGK = KEY_CTL_TKIP; } if (param->u.crypt.alg == WPA_ALG_CCMP) { if ((param->u.crypt.key_len != AES_KEY_LEN) || (pDevice->byLocalID <= REV_ID_VT3253_A1)) return -EINVAL; pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; byKeyDecMode = KEY_CTL_CCMP; pMgmt->byCSSPK = KEY_CTL_CCMP; pMgmt->byCSSGK = KEY_CTL_CCMP; } if (iNodeIndex == 0) { KeybSetDefaultKey( pDevice, &(pDevice->sKey), dwKeyIndex, param->u.crypt.key_len, (PQWORD) &(KeyRSC), abyKey, byKeyDecMode ); pMgmt->sNodeDBTable[iNodeIndex].bOnFly = TRUE; } else { dwKeyIndex |= (1 << 30); // set pairwise key if (KeybSetKey(pDevice, &(pDevice->sKey), &param->sta_addr[0], dwKeyIndex, param->u.crypt.key_len, (PQWORD) &(KeyRSC), (PBYTE)abyKey, byKeyDecMode ) == TRUE) { pMgmt->sNodeDBTable[iNodeIndex].bOnFly = TRUE; } else { // Key Table Full pMgmt->sNodeDBTable[iNodeIndex].bOnFly = FALSE; bKeyTableFull = TRUE; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " Key Table Full\n"); } } if (bKeyTableFull == TRUE) { wKeyCtl &= 0x7F00; // clear all key control filed wKeyCtl |= (byKeyDecMode << 4); wKeyCtl |= (byKeyDecMode); wKeyCtl |= 0x0044; // use group key for all address wKeyCtl |= 0x4000; // disable KeyTable[MAX_KEY_TABLE-1] on-fly to genernate rx int // Todo.. xxxxxx //MACvSetDefaultKeyCtl(pDevice->PortOffset, wKeyCtl, MAX_KEY_TABLE-1, pDevice->byLocalID); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " Set key sta_index= %d \n", iNodeIndex); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " tx_index=%d len=%d \n", param->u.crypt.idx, param->u.crypt.key_len ); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " key=%x-%x-%x-%x-%x-xxxxx \n", pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[0], pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[1], pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[2], pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[3], pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[4] ); // set wep key pDevice->bEncryptionEnable = TRUE; pMgmt->sNodeDBTable[iNodeIndex].byCipherSuite = byKeyDecMode; pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex = dwKeyIndex; pMgmt->sNodeDBTable[iNodeIndex].dwTSC47_16 = 0; pMgmt->sNodeDBTable[iNodeIndex].wTSC15_0 = 0; return ret; } /* * Description: * get each stations encryption key * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int hostap_get_encryption(PSDevice pDevice, struct viawget_hostapd_param *param, int param_len) { PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int ret = 0; int ii; int iNodeIndex =0; param->u.crypt.err = 0; if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff && param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff && param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) { iNodeIndex = 0; } else { if (BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &iNodeIndex) == FALSE) { param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "hostap_get_encryption: HOSTAP_CRYPT_ERR_UNKNOWN_ADDR\n"); return -EINVAL; } } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "hostap_get_encryption: %d\n", iNodeIndex); memset(param->u.crypt.seq, 0, 8); for (ii = 0 ; ii < 8 ; ii++) { param->u.crypt.seq[ii] = (BYTE)pMgmt->sNodeDBTable[iNodeIndex].KeyRSC >> (ii * 8); } return ret; } /* * Description: * vt6656_hostap_ioctl main function supported for hostap deamon. * * Parameters: * In: * pDevice - * iw_point - * Out: * * Return Value: * */ int vt6656_hostap_ioctl(PSDevice pDevice, struct iw_point *p) { struct viawget_hostapd_param *param; int ret = 0; int ap_ioctl = 0; if (p->length < sizeof(struct viawget_hostapd_param) || p->length > VIAWGET_HOSTAPD_MAX_BUF_SIZE || !p->pointer) return -EINVAL; param = kmalloc((int)p->length, (int)GFP_KERNEL); if (param == NULL) return -ENOMEM; if (copy_from_user(param, p->pointer, p->length)) { ret = -EFAULT; goto out; } switch (param->cmd) { case VIAWGET_HOSTAPD_SET_ENCRYPTION: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_ENCRYPTION \n"); spin_lock_irq(&pDevice->lock); ret = hostap_set_encryption(pDevice, param, p->length); spin_unlock_irq(&pDevice->lock); break; case VIAWGET_HOSTAPD_GET_ENCRYPTION: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_GET_ENCRYPTION \n"); spin_lock_irq(&pDevice->lock); ret = hostap_get_encryption(pDevice, param, p->length); spin_unlock_irq(&pDevice->lock); break; case VIAWGET_HOSTAPD_SET_ASSOC_AP_ADDR: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_ASSOC_AP_ADDR \n"); return -EOPNOTSUPP; break; case VIAWGET_HOSTAPD_FLUSH: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_FLUSH \n"); spin_lock_irq(&pDevice->lock); hostap_flush_sta(pDevice); spin_unlock_irq(&pDevice->lock); break; case VIAWGET_HOSTAPD_ADD_STA: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_ADD_STA \n"); spin_lock_irq(&pDevice->lock); ret = hostap_add_sta(pDevice, param); spin_unlock_irq(&pDevice->lock); break; case VIAWGET_HOSTAPD_REMOVE_STA: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_REMOVE_STA \n"); spin_lock_irq(&pDevice->lock); ret = hostap_remove_sta(pDevice, param); spin_unlock_irq(&pDevice->lock); break; case VIAWGET_HOSTAPD_GET_INFO_STA: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_GET_INFO_STA \n"); ret = hostap_get_info_sta(pDevice, param); ap_ioctl = 1; break; /* case VIAWGET_HOSTAPD_RESET_TXEXC_STA: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_RESET_TXEXC_STA \n"); ret = hostap_reset_txexc_sta(pDevice, param); break; */ case VIAWGET_HOSTAPD_SET_FLAGS_STA: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_FLAGS_STA \n"); ret = hostap_set_flags_sta(pDevice, param); break; case VIAWGET_HOSTAPD_MLME: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_MLME \n"); return -EOPNOTSUPP; case VIAWGET_HOSTAPD_SET_GENERIC_ELEMENT: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_GENERIC_ELEMENT \n"); ret = hostap_set_generic_element(pDevice, param); break; case VIAWGET_HOSTAPD_SCAN_REQ: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SCAN_REQ \n"); return -EOPNOTSUPP; case VIAWGET_HOSTAPD_STA_CLEAR_STATS: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_STA_CLEAR_STATS \n"); return -EOPNOTSUPP; default: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "vt6656_hostap_ioctl: unknown cmd=%d\n", (int)param->cmd); return -EOPNOTSUPP; break; } if ((ret == 0) && ap_ioctl) { if (copy_to_user(p->pointer, param, p->length)) { ret = -EFAULT; goto out; } } out: kfree(param); return ret; }
gpl-2.0
Supermaster34/3.0-Kernel-Galaxy-Player-US
net/bluetooth/bnep/sock.c
7753
5723
/* BNEP implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2001-2002 Inventel Systemes Written 2001-2002 by David Libault <david.libault@inventel.fr> Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/module.h> #include <linux/types.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/skbuff.h> #include <linux/socket.h> #include <linux/ioctl.h> #include <linux/file.h> #include <linux/init.h> #include <linux/compat.h> #include <linux/gfp.h> #include <linux/uaccess.h> #include <net/sock.h> #include <asm/system.h> #include "bnep.h" static int bnep_sock_release(struct socket *sock) { struct sock *sk = sock->sk; BT_DBG("sock %p sk %p", sock, sk); if (!sk) return 0; sock_orphan(sk); sock_put(sk); return 0; } static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct bnep_connlist_req cl; struct bnep_connadd_req ca; struct bnep_conndel_req cd; struct bnep_conninfo ci; struct socket *nsock; void __user *argp = (void __user *)arg; int err; BT_DBG("cmd %x arg %lx", cmd, arg); switch (cmd) { case BNEPCONNADD: if (!capable(CAP_NET_ADMIN)) return -EACCES; if (copy_from_user(&ca, argp, sizeof(ca))) return -EFAULT; nsock = sockfd_lookup(ca.sock, &err); if (!nsock) return err; if (nsock->sk->sk_state != BT_CONNECTED) { sockfd_put(nsock); return -EBADFD; } ca.device[sizeof(ca.device)-1] = 0; err = bnep_add_connection(&ca, nsock); if (!err) { if (copy_to_user(argp, &ca, sizeof(ca))) err = -EFAULT; } else sockfd_put(nsock); return err; case BNEPCONNDEL: if (!capable(CAP_NET_ADMIN)) return -EACCES; if (copy_from_user(&cd, argp, sizeof(cd))) return -EFAULT; return bnep_del_connection(&cd); case BNEPGETCONNLIST: if (copy_from_user(&cl, argp, sizeof(cl))) return -EFAULT; if (cl.cnum <= 0) return -EINVAL; err = bnep_get_connlist(&cl); if (!err && copy_to_user(argp, &cl, sizeof(cl))) return -EFAULT; return err; case BNEPGETCONNINFO: if (copy_from_user(&ci, argp, sizeof(ci))) return -EFAULT; err = bnep_get_conninfo(&ci); if (!err && copy_to_user(argp, &ci, sizeof(ci))) return -EFAULT; return err; default: return -EINVAL; } return 0; } #ifdef CONFIG_COMPAT static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { if (cmd == BNEPGETCONNLIST) { struct bnep_connlist_req cl; uint32_t uci; int err; if (get_user(cl.cnum, (uint32_t __user *) arg) || get_user(uci, (u32 __user *) (arg + 4))) return -EFAULT; cl.ci = compat_ptr(uci); if (cl.cnum <= 0) return -EINVAL; err = bnep_get_connlist(&cl); if (!err && put_user(cl.cnum, (uint32_t __user *) arg)) err = -EFAULT; return err; } return bnep_sock_ioctl(sock, cmd, arg); } #endif static const struct proto_ops bnep_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = bnep_sock_release, .ioctl = bnep_sock_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = bnep_sock_compat_ioctl, #endif .bind = sock_no_bind, .getname = sock_no_getname, .sendmsg = sock_no_sendmsg, .recvmsg = sock_no_recvmsg, .poll = sock_no_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .mmap = sock_no_mmap }; static struct proto bnep_proto = { .name = "BNEP", .owner = THIS_MODULE, .obj_size = sizeof(struct bt_sock) }; static int bnep_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock->ops = &bnep_sock_ops; sock->state = SS_UNCONNECTED; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = protocol; sk->sk_state = BT_OPEN; return 0; } static const struct net_proto_family bnep_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = bnep_sock_create }; int __init bnep_sock_init(void) { int err; err = proto_register(&bnep_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops); if (err < 0) goto error; return 0; error: BT_ERR("Can't register BNEP socket"); proto_unregister(&bnep_proto); return err; } void __exit bnep_sock_cleanup(void) { if (bt_sock_unregister(BTPROTO_BNEP) < 0) BT_ERR("Can't unregister BNEP socket"); proto_unregister(&bnep_proto); }
gpl-2.0
SlimDev/kernel_samsung_msm8660-common
net/sched/sch_drr.c
8009
11557
/* * net/sched/sch_drr.c Deficit Round Robin scheduler * * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/pkt_sched.h> #include <net/sch_generic.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> struct drr_class { struct Qdisc_class_common common; unsigned int refcnt; unsigned int filter_cnt; struct gnet_stats_basic_packed bstats; struct gnet_stats_queue qstats; struct gnet_stats_rate_est rate_est; struct list_head alist; struct Qdisc *qdisc; u32 quantum; u32 deficit; }; struct drr_sched { struct list_head active; struct tcf_proto *filter_list; struct Qdisc_class_hash clhash; }; static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid) { struct drr_sched *q = qdisc_priv(sch); struct Qdisc_class_common *clc; clc = qdisc_class_find(&q->clhash, classid); if (clc == NULL) return NULL; return container_of(clc, struct drr_class, common); } static void drr_purge_queue(struct drr_class *cl) { unsigned int len = cl->qdisc->q.qlen; qdisc_reset(cl->qdisc); qdisc_tree_decrease_qlen(cl->qdisc, len); } static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = { [TCA_DRR_QUANTUM] = { .type = NLA_U32 }, }; static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, unsigned long *arg) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl = (struct drr_class *)*arg; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_DRR_MAX + 1]; u32 quantum; int err; if (!opt) return -EINVAL; err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy); if (err < 0) return err; if (tb[TCA_DRR_QUANTUM]) { quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]); if (quantum == 0) return -EINVAL; } else quantum = psched_mtu(qdisc_dev(sch)); if (cl != NULL) { if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, &cl->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); if (err) return err; } sch_tree_lock(sch); if (tb[TCA_DRR_QUANTUM]) cl->quantum = quantum; sch_tree_unlock(sch); return 0; } cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL); if (cl == NULL) return -ENOBUFS; cl->refcnt = 1; cl->common.classid = classid; cl->quantum = quantum; cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); if (cl->qdisc == NULL) cl->qdisc = &noop_qdisc; if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, &cl->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); if (err) { qdisc_destroy(cl->qdisc); kfree(cl); return err; } } sch_tree_lock(sch); qdisc_class_hash_insert(&q->clhash, &cl->common); sch_tree_unlock(sch); qdisc_class_hash_grow(sch, &q->clhash); *arg = (unsigned long)cl; return 0; } static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl) { gen_kill_estimator(&cl->bstats, &cl->rate_est); qdisc_destroy(cl->qdisc); kfree(cl); } static int drr_delete_class(struct Qdisc *sch, unsigned long arg) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl = (struct drr_class *)arg; if (cl->filter_cnt > 0) return -EBUSY; sch_tree_lock(sch); drr_purge_queue(cl); qdisc_class_hash_remove(&q->clhash, &cl->common); BUG_ON(--cl->refcnt == 0); /* * This shouldn't happen: we "hold" one cops->get() when called * from tc_ctl_tclass; the destroy method is done from cops->put(). */ sch_tree_unlock(sch); return 0; } static unsigned long drr_get_class(struct Qdisc *sch, u32 classid) { struct drr_class *cl = drr_find_class(sch, classid); if (cl != NULL) cl->refcnt++; return (unsigned long)cl; } static void drr_put_class(struct Qdisc *sch, unsigned long arg) { struct drr_class *cl = (struct drr_class *)arg; if (--cl->refcnt == 0) drr_destroy_class(sch, cl); } static struct tcf_proto **drr_tcf_chain(struct Qdisc *sch, unsigned long cl) { struct drr_sched *q = qdisc_priv(sch); if (cl) return NULL; return &q->filter_list; } static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid) { struct drr_class *cl = drr_find_class(sch, classid); if (cl != NULL) cl->filter_cnt++; return (unsigned long)cl; } static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg) { struct drr_class *cl = (struct drr_class *)arg; cl->filter_cnt--; } static int drr_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { struct drr_class *cl = (struct drr_class *)arg; if (new == NULL) { new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, cl->common.classid); if (new == NULL) new = &noop_qdisc; } sch_tree_lock(sch); drr_purge_queue(cl); *old = cl->qdisc; cl->qdisc = new; sch_tree_unlock(sch); return 0; } static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg) { struct drr_class *cl = (struct drr_class *)arg; return cl->qdisc; } static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg) { struct drr_class *cl = (struct drr_class *)arg; if (cl->qdisc->q.qlen == 0) list_del(&cl->alist); } static int drr_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, struct tcmsg *tcm) { struct drr_class *cl = (struct drr_class *)arg; struct nlattr *nest; tcm->tcm_parent = TC_H_ROOT; tcm->tcm_handle = cl->common.classid; tcm->tcm_info = cl->qdisc->handle; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; NLA_PUT_U32(skb, TCA_DRR_QUANTUM, cl->quantum); return nla_nest_end(skb, nest); nla_put_failure: nla_nest_cancel(skb, nest); return -EMSGSIZE; } static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) { struct drr_class *cl = (struct drr_class *)arg; struct tc_drr_stats xstats; memset(&xstats, 0, sizeof(xstats)); if (cl->qdisc->q.qlen) { xstats.deficit = cl->deficit; cl->qdisc->qstats.qlen = cl->qdisc->q.qlen; } if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0) return -1; return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); } static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; struct hlist_node *n; unsigned int i; if (arg->stop) return; for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { if (arg->count < arg->skip) { arg->count++; continue; } if (arg->fn(sch, (unsigned long)cl, arg) < 0) { arg->stop = 1; return; } arg->count++; } } } static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; struct tcf_result res; int result; if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { cl = drr_find_class(sch, skb->priority); if (cl != NULL) return cl; } *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; result = tc_classify(skb, q->filter_list, &res); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; } #endif cl = (struct drr_class *)res.class; if (cl == NULL) cl = drr_find_class(sch, res.classid); return cl; } return NULL; } static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; int err; cl = drr_classify(skb, sch, &err); if (cl == NULL) { if (err & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return err; } err = qdisc_enqueue(skb, cl->qdisc); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; sch->qstats.drops++; } return err; } if (cl->qdisc->q.qlen == 1) { list_add_tail(&cl->alist, &q->active); cl->deficit = cl->quantum; } bstats_update(&cl->bstats, skb); sch->q.qlen++; return err; } static struct sk_buff *drr_dequeue(struct Qdisc *sch) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; struct sk_buff *skb; unsigned int len; if (list_empty(&q->active)) goto out; while (1) { cl = list_first_entry(&q->active, struct drr_class, alist); skb = cl->qdisc->ops->peek(cl->qdisc); if (skb == NULL) goto out; len = qdisc_pkt_len(skb); if (len <= cl->deficit) { cl->deficit -= len; skb = qdisc_dequeue_peeked(cl->qdisc); if (cl->qdisc->q.qlen == 0) list_del(&cl->alist); qdisc_bstats_update(sch, skb); sch->q.qlen--; return skb; } cl->deficit += cl->quantum; list_move_tail(&cl->alist, &q->active); } out: return NULL; } static unsigned int drr_drop(struct Qdisc *sch) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; unsigned int len; list_for_each_entry(cl, &q->active, alist) { if (cl->qdisc->ops->drop) { len = cl->qdisc->ops->drop(cl->qdisc); if (len > 0) { sch->q.qlen--; if (cl->qdisc->q.qlen == 0) list_del(&cl->alist); return len; } } } return 0; } static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt) { struct drr_sched *q = qdisc_priv(sch); int err; err = qdisc_class_hash_init(&q->clhash); if (err < 0) return err; INIT_LIST_HEAD(&q->active); return 0; } static void drr_reset_qdisc(struct Qdisc *sch) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; struct hlist_node *n; unsigned int i; for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { if (cl->qdisc->q.qlen) list_del(&cl->alist); qdisc_reset(cl->qdisc); } } sch->q.qlen = 0; } static void drr_destroy_qdisc(struct Qdisc *sch) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; struct hlist_node *n, *next; unsigned int i; tcf_destroy_chain(&q->filter_list); for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], common.hnode) drr_destroy_class(sch, cl); } qdisc_class_hash_destroy(&q->clhash); } static const struct Qdisc_class_ops drr_class_ops = { .change = drr_change_class, .delete = drr_delete_class, .get = drr_get_class, .put = drr_put_class, .tcf_chain = drr_tcf_chain, .bind_tcf = drr_bind_tcf, .unbind_tcf = drr_unbind_tcf, .graft = drr_graft_class, .leaf = drr_class_leaf, .qlen_notify = drr_qlen_notify, .dump = drr_dump_class, .dump_stats = drr_dump_class_stats, .walk = drr_walk, }; static struct Qdisc_ops drr_qdisc_ops __read_mostly = { .cl_ops = &drr_class_ops, .id = "drr", .priv_size = sizeof(struct drr_sched), .enqueue = drr_enqueue, .dequeue = drr_dequeue, .peek = qdisc_peek_dequeued, .drop = drr_drop, .init = drr_init_qdisc, .reset = drr_reset_qdisc, .destroy = drr_destroy_qdisc, .owner = THIS_MODULE, }; static int __init drr_init(void) { return register_qdisc(&drr_qdisc_ops); } static void __exit drr_exit(void) { unregister_qdisc(&drr_qdisc_ops); } module_init(drr_init); module_exit(drr_exit); MODULE_LICENSE("GPL");
gpl-2.0
Project-Elite/elite_kernel_m7
arch/ia64/pci/fixup.c
9289
2138
/* * Exceptions for specific devices. Usually work-arounds for fatal design flaws. * Derived from fixup.c of i386 tree. */ #include <linux/pci.h> #include <linux/init.h> #include <asm/machvec.h> /* * Fixup to mark boot BIOS video selected by BIOS before it changes * * From information provided by "Jon Smirl" <jonsmirl@gmail.com> * * The standard boot ROM sequence for an x86 machine uses the BIOS * to select an initial video card for boot display. This boot video * card will have it's BIOS copied to C0000 in system RAM. * IORESOURCE_ROM_SHADOW is used to associate the boot video * card with this copy. On laptops this copy has to be used since * the main ROM may be compressed or combined with another image. * See pci_map_rom() for use of this flag. IORESOURCE_ROM_SHADOW * is marked here since the boot video device will be the only enabled * video device at this point. */ static void __devinit pci_fixup_video(struct pci_dev *pdev) { struct pci_dev *bridge; struct pci_bus *bus; u16 config; if ((strcmp(platform_name, "dig") != 0) && (strcmp(platform_name, "hpzx1") != 0)) return; /* Maybe, this machine supports legacy memory map. */ if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) return; /* Is VGA routed to us? */ bus = pdev->bus; while (bus) { bridge = bus->self; /* * From information provided by * "David Miller" <davem@davemloft.net> * The bridge control register is valid for PCI header * type BRIDGE, or CARDBUS. Host to PCI controllers use * PCI header type NORMAL. */ if (bridge &&((bridge->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||(bridge->hdr_type == PCI_HEADER_TYPE_CARDBUS))) { pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &config); if (!(config & PCI_BRIDGE_CTL_VGA)) return; } bus = bus->parent; } pci_read_config_word(pdev, PCI_COMMAND, &config); if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n"); } } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video);
gpl-2.0
lithid/furnace_kernel_lge_hammerhead
drivers/gpu/drm/nouveau/nouveau_ioc32.c
9289
2223
/** * \file mga_ioc32.c * * 32-bit ioctl compatibility routines for the MGA DRM. * * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich * * * Copyright (C) Paul Mackerras 2005 * Copyright (C) Egbert Eich 2003,2004 * Copyright (C) Dave Airlie 2005 * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <linux/compat.h> #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" /** * Called whenever a 32-bit process running under a 64-bit kernel * performs an ioctl on /dev/dri/card<n>. * * \param filp file pointer. * \param cmd command. * \param arg user argument. * \return zero on success or negative number on failure. */ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); drm_ioctl_compat_t *fn = NULL; int ret; if (nr < DRM_COMMAND_BASE) return drm_compat_ioctl(filp, cmd, arg); #if 0 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE]; #endif if (fn != NULL) ret = (*fn)(filp, cmd, arg); else ret = drm_ioctl(filp, cmd, arg); return ret; }
gpl-2.0
voidz777/android_kernel_samsung_msm8660-common
arch/mn10300/unit-asb2364/unit-init.c
12105
3490
/* ASB2364 initialisation * * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <linux/device.h> #include <linux/delay.h> #include <asm/io.h> #include <asm/setup.h> #include <asm/processor.h> #include <asm/irq.h> #include <asm/intctl-regs.h> #include <asm/serial-regs.h> #include <unit/fpga-regs.h> #include <unit/serial.h> #include <unit/smsc911x.h> #define TTYS0_SERIAL_IER __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_IER * 2, u8) #define LAN_IRQ_CFG __SYSREG(SMSC911X_BASE + 0x54, u32) #define LAN_INT_EN __SYSREG(SMSC911X_BASE + 0x5c, u32) /* * initialise some of the unit hardware before gdbstub is set up */ asmlinkage void __init unit_init(void) { /* Make sure we aren't going to get unexpected interrupts */ TTYS0_SERIAL_IER = 0; SC0RXICR = 0; SC0TXICR = 0; SC1RXICR = 0; SC1TXICR = 0; SC2RXICR = 0; SC2TXICR = 0; /* Attempt to reset the FPGA attached peripherals */ ASB2364_FPGA_REG_RESET_LAN = 0x0000; SyncExBus(); ASB2364_FPGA_REG_RESET_UART = 0x0000; SyncExBus(); ASB2364_FPGA_REG_RESET_I2C = 0x0000; SyncExBus(); ASB2364_FPGA_REG_RESET_USB = 0x0000; SyncExBus(); ASB2364_FPGA_REG_RESET_AV = 0x0000; SyncExBus(); /* set up the external interrupts */ /* XIRQ[0]: NAND RXBY */ /* SET_XIRQ_TRIGGER(0, XIRQ_TRIGGER_LOWLEVEL); */ /* XIRQ[1]: LAN, UART, I2C, USB, PCI, FPGA */ SET_XIRQ_TRIGGER(1, XIRQ_TRIGGER_LOWLEVEL); /* XIRQ[2]: Extend Slot 1-9 */ /* SET_XIRQ_TRIGGER(2, XIRQ_TRIGGER_LOWLEVEL); */ #if defined(CONFIG_EXT_SERIAL_IRQ_LEVEL) && \ defined(CONFIG_ETHERNET_IRQ_LEVEL) && \ (CONFIG_EXT_SERIAL_IRQ_LEVEL != CONFIG_ETHERNET_IRQ_LEVEL) # error CONFIG_EXT_SERIAL_IRQ_LEVEL != CONFIG_ETHERNET_IRQ_LEVEL #endif #if defined(CONFIG_EXT_SERIAL_IRQ_LEVEL) set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL)); #elif defined(CONFIG_ETHERNET_IRQ_LEVEL) set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_ETHERNET_IRQ_LEVEL)); #endif } /* * initialise the rest of the unit hardware after gdbstub is ready */ asmlinkage void __init unit_setup(void) { /* Release the reset on the SMSC911X so that it is ready by the time we * need it */ ASB2364_FPGA_REG_RESET_LAN = 0x0001; SyncExBus(); ASB2364_FPGA_REG_RESET_UART = 0x0001; SyncExBus(); ASB2364_FPGA_REG_RESET_I2C = 0x0001; SyncExBus(); ASB2364_FPGA_REG_RESET_USB = 0x0001; SyncExBus(); ASB2364_FPGA_REG_RESET_AV = 0x0001; SyncExBus(); /* Make sure the ethernet chipset isn't going to give us an interrupt * storm from stuff it was doing pre-reset */ LAN_IRQ_CFG = 0; LAN_INT_EN = 0; } /* * initialise the external interrupts used by a unit of this type */ void __init unit_init_IRQ(void) { unsigned int extnum; for (extnum = 0 ; extnum < NR_XIRQS ; extnum++) { switch (GET_XIRQ_TRIGGER(extnum)) { /* LEVEL triggered interrupts should be made * post-ACK'able as they hold their lines until * serviced */ case XIRQ_TRIGGER_HILEVEL: case XIRQ_TRIGGER_LOWLEVEL: mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum)); break; default: break; } } #define IRQCTL __SYSREG(0xd5000090, u32) IRQCTL |= 0x02; irq_fpga_init(); }
gpl-2.0
beta1993/android_kernel_htc_msm7227
fs/nfs/symlink.c
12617
1661
/* * linux/fs/nfs/symlink.c * * Copyright (C) 1992 Rick Sladkey * * Optimization changes Copyright (C) 1994 Florian La Roche * * Jun 7 1999, cache symlink lookups in the page cache. -DaveM * * nfs symlink handling code */ #include <linux/time.h> #include <linux/errno.h> #include <linux/sunrpc/clnt.h> #include <linux/nfs.h> #include <linux/nfs2.h> #include <linux/nfs_fs.h> #include <linux/pagemap.h> #include <linux/stat.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/namei.h> /* Symlink caching in the page cache is even more simplistic * and straight-forward than readdir caching. */ static int nfs_symlink_filler(struct inode *inode, struct page *page) { int error; error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE); if (error < 0) goto error; SetPageUptodate(page); unlock_page(page); return 0; error: SetPageError(page); unlock_page(page); return -EIO; } static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd) { struct inode *inode = dentry->d_inode; struct page *page; void *err; err = ERR_PTR(nfs_revalidate_mapping(inode, inode->i_mapping)); if (err) goto read_failed; page = read_cache_page(&inode->i_data, 0, (filler_t *)nfs_symlink_filler, inode); if (IS_ERR(page)) { err = page; goto read_failed; } nd_set_link(nd, kmap(page)); return page; read_failed: nd_set_link(nd, err); return NULL; } /* * symlinks can't do much... */ const struct inode_operations nfs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = nfs_follow_link, .put_link = page_put_link, .getattr = nfs_getattr, .setattr = nfs_setattr, };
gpl-2.0
Split-Screen/android_kernel_lge_gee
drivers/video/riva/nv_driver.c
14665
9688
/* $XConsortium: nv_driver.c /main/3 1996/10/28 05:13:37 kaleb $ */ /* * Copyright 1996-1997 David J. McKay * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * DAVID J. MCKAY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * GPL licensing note -- nVidia is allowing a liberal interpretation of * the documentation restriction above, to merely say that this nVidia's * copyright and disclaimer should be included with all code derived * from this source. -- Jeff Garzik <jgarzik@pobox.com>, 01/Nov/99 */ /* Hacked together from mga driver and 3.3.4 NVIDIA driver by Jarno Paananen <jpaana@s2.org> */ /* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/nv_setup.c,v 1.18 2002/08/0 5 20:47:06 mvojkovi Exp $ */ #include <linux/delay.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include "nv_type.h" #include "rivafb.h" #include "nvreg.h" #define PFX "rivafb: " static inline unsigned char MISCin(struct riva_par *par) { return (VGA_RD08(par->riva.PVIO, 0x3cc)); } static Bool riva_is_connected(struct riva_par *par, Bool second) { volatile U032 __iomem *PRAMDAC = par->riva.PRAMDAC0; U032 reg52C, reg608; Bool present; if(second) PRAMDAC += 0x800; reg52C = NV_RD32(PRAMDAC, 0x052C); reg608 = NV_RD32(PRAMDAC, 0x0608); NV_WR32(PRAMDAC, 0x0608, reg608 & ~0x00010000); NV_WR32(PRAMDAC, 0x052C, reg52C & 0x0000FEEE); mdelay(1); NV_WR32(PRAMDAC, 0x052C, NV_RD32(PRAMDAC, 0x052C) | 1); NV_WR32(par->riva.PRAMDAC0, 0x0610, 0x94050140); NV_WR32(par->riva.PRAMDAC0, 0x0608, 0x00001000); mdelay(1); present = (NV_RD32(PRAMDAC, 0x0608) & (1 << 28)) ? TRUE : FALSE; NV_WR32(par->riva.PRAMDAC0, 0x0608, NV_RD32(par->riva.PRAMDAC0, 0x0608) & 0x0000EFFF); NV_WR32(PRAMDAC, 0x052C, reg52C); NV_WR32(PRAMDAC, 0x0608, reg608); return present; } static void riva_override_CRTC(struct riva_par *par) { printk(KERN_INFO PFX "Detected CRTC controller %i being used\n", par->SecondCRTC ? 1 : 0); if(par->forceCRTC != -1) { printk(KERN_INFO PFX "Forcing usage of CRTC %i\n", par->forceCRTC); par->SecondCRTC = par->forceCRTC; } } static void riva_is_second(struct riva_par *par) { if (par->FlatPanel == 1) { switch(par->Chipset & 0xffff) { case 0x0174: case 0x0175: case 0x0176: case 0x0177: case 0x0179: case 0x017C: case 0x017D: case 0x0186: case 0x0187: /* this might not be a good default for the chips below */ case 0x0286: case 0x028C: case 0x0316: case 0x0317: case 0x031A: case 0x031B: case 0x031C: case 0x031D: case 0x031E: case 0x031F: case 0x0324: case 0x0325: case 0x0328: case 0x0329: case 0x032C: case 0x032D: par->SecondCRTC = TRUE; break; default: par->SecondCRTC = FALSE; break; } } else { if(riva_is_connected(par, 0)) { if (NV_RD32(par->riva.PRAMDAC0, 0x0000052C) & 0x100) par->SecondCRTC = TRUE; else par->SecondCRTC = FALSE; } else if (riva_is_connected(par, 1)) { if(NV_RD32(par->riva.PRAMDAC0, 0x0000252C) & 0x100) par->SecondCRTC = TRUE; else par->SecondCRTC = FALSE; } else /* default */ par->SecondCRTC = FALSE; } riva_override_CRTC(par); } unsigned long riva_get_memlen(struct riva_par *par) { RIVA_HW_INST *chip = &par->riva; unsigned long memlen = 0; unsigned int chipset = par->Chipset; struct pci_dev* dev; u32 amt; switch (chip->Architecture) { case NV_ARCH_03: if (NV_RD32(chip->PFB, 0x00000000) & 0x00000020) { if (((NV_RD32(chip->PMC, 0x00000000) & 0xF0) == 0x20) && ((NV_RD32(chip->PMC, 0x00000000)&0x0F)>=0x02)) { /* * SDRAM 128 ZX. */ switch (NV_RD32(chip->PFB,0x00000000) & 0x03) { case 2: memlen = 1024 * 4; break; case 1: memlen = 1024 * 2; break; default: memlen = 1024 * 8; break; } } else { memlen = 1024 * 8; } } else { /* * SGRAM 128. */ switch (NV_RD32(chip->PFB, 0x00000000) & 0x00000003) { case 0: memlen = 1024 * 8; break; case 2: memlen = 1024 * 4; break; default: memlen = 1024 * 2; break; } } break; case NV_ARCH_04: if (NV_RD32(chip->PFB, 0x00000000) & 0x00000100) { memlen = ((NV_RD32(chip->PFB, 0x00000000)>>12)&0x0F) * 1024 * 2 + 1024 * 2; } else { switch (NV_RD32(chip->PFB, 0x00000000) & 0x00000003) { case 0: memlen = 1024 * 32; break; case 1: memlen = 1024 * 4; break; case 2: memlen = 1024 * 8; break; case 3: default: memlen = 1024 * 16; break; } } break; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: if(chipset == NV_CHIP_IGEFORCE2) { dev = pci_get_bus_and_slot(0, 1); pci_read_config_dword(dev, 0x7C, &amt); pci_dev_put(dev); memlen = (((amt >> 6) & 31) + 1) * 1024; } else if (chipset == NV_CHIP_0x01F0) { dev = pci_get_bus_and_slot(0, 1); pci_read_config_dword(dev, 0x84, &amt); pci_dev_put(dev); memlen = (((amt >> 4) & 127) + 1) * 1024; } else { switch ((NV_RD32(chip->PFB, 0x0000020C) >> 20) & 0x000000FF){ case 0x02: memlen = 1024 * 2; break; case 0x04: memlen = 1024 * 4; break; case 0x08: memlen = 1024 * 8; break; case 0x10: memlen = 1024 * 16; break; case 0x20: memlen = 1024 * 32; break; case 0x40: memlen = 1024 * 64; break; case 0x80: memlen = 1024 * 128; break; default: memlen = 1024 * 16; break; } } break; } return memlen; } unsigned long riva_get_maxdclk(struct riva_par *par) { RIVA_HW_INST *chip = &par->riva; unsigned long dclk = 0; switch (chip->Architecture) { case NV_ARCH_03: if (NV_RD32(chip->PFB, 0x00000000) & 0x00000020) { if (((NV_RD32(chip->PMC, 0x00000000) & 0xF0) == 0x20) && ((NV_RD32(chip->PMC,0x00000000)&0x0F) >= 0x02)) { /* * SDRAM 128 ZX. */ dclk = 800000; } else { dclk = 1000000; } } else { /* * SGRAM 128. */ dclk = 1000000; } break; case NV_ARCH_04: case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: switch ((NV_RD32(chip->PFB, 0x00000000) >> 3) & 0x00000003) { case 3: dclk = 800000; break; default: dclk = 1000000; break; } break; } return dclk; } void riva_common_setup(struct riva_par *par) { par->riva.EnableIRQ = 0; par->riva.PRAMDAC0 = (volatile U032 __iomem *)(par->ctrl_base + 0x00680000); par->riva.PFB = (volatile U032 __iomem *)(par->ctrl_base + 0x00100000); par->riva.PFIFO = (volatile U032 __iomem *)(par->ctrl_base + 0x00002000); par->riva.PGRAPH = (volatile U032 __iomem *)(par->ctrl_base + 0x00400000); par->riva.PEXTDEV = (volatile U032 __iomem *)(par->ctrl_base + 0x00101000); par->riva.PTIMER = (volatile U032 __iomem *)(par->ctrl_base + 0x00009000); par->riva.PMC = (volatile U032 __iomem *)(par->ctrl_base + 0x00000000); par->riva.FIFO = (volatile U032 __iomem *)(par->ctrl_base + 0x00800000); par->riva.PCIO0 = par->ctrl_base + 0x00601000; par->riva.PDIO0 = par->ctrl_base + 0x00681000; par->riva.PVIO = par->ctrl_base + 0x000C0000; par->riva.IO = (MISCin(par) & 0x01) ? 0x3D0 : 0x3B0; if (par->FlatPanel == -1) { switch (par->Chipset & 0xffff) { case 0x0112: /* known laptop chips */ case 0x0174: case 0x0175: case 0x0176: case 0x0177: case 0x0179: case 0x017C: case 0x017D: case 0x0186: case 0x0187: case 0x0286: case 0x028C: case 0x0316: case 0x0317: case 0x031A: case 0x031B: case 0x031C: case 0x031D: case 0x031E: case 0x031F: case 0x0324: case 0x0325: case 0x0328: case 0x0329: case 0x032C: case 0x032D: printk(KERN_INFO PFX "On a laptop. Assuming Digital Flat Panel\n"); par->FlatPanel = 1; break; default: break; } } switch (par->Chipset & 0x0ff0) { case 0x0110: if (par->Chipset == NV_CHIP_GEFORCE2_GO) par->SecondCRTC = TRUE; #if defined(__powerpc__) if (par->FlatPanel == 1) par->SecondCRTC = TRUE; #endif riva_override_CRTC(par); break; case 0x0170: case 0x0180: case 0x01F0: case 0x0250: case 0x0280: case 0x0300: case 0x0310: case 0x0320: case 0x0330: case 0x0340: riva_is_second(par); break; default: break; } if (par->SecondCRTC) { par->riva.PCIO = par->riva.PCIO0 + 0x2000; par->riva.PCRTC = par->riva.PCRTC0 + 0x800; par->riva.PRAMDAC = par->riva.PRAMDAC0 + 0x800; par->riva.PDIO = par->riva.PDIO0 + 0x2000; } else { par->riva.PCIO = par->riva.PCIO0; par->riva.PCRTC = par->riva.PCRTC0; par->riva.PRAMDAC = par->riva.PRAMDAC0; par->riva.PDIO = par->riva.PDIO0; } if (par->FlatPanel == -1) { /* Fix me, need x86 DDC code */ par->FlatPanel = 0; } par->riva.flatPanel = (par->FlatPanel > 0) ? TRUE : FALSE; RivaGetConfig(&par->riva, par->Chipset); }
gpl-2.0
markyzq/kernel-next
sound/isa/gus/gus_dram.c
14921
2994
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * DRAM access routines * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <sound/core.h> #include <sound/gus.h> #include <sound/info.h> static int snd_gus_dram_poke(struct snd_gus_card *gus, char __user *_buffer, unsigned int address, unsigned int size) { unsigned long flags; unsigned int size1, size2; char buffer[256], *pbuffer; while (size > 0) { size1 = size > sizeof(buffer) ? sizeof(buffer) : size; if (copy_from_user(buffer, _buffer, size1)) return -EFAULT; if (gus->interwave) { spin_lock_irqsave(&gus->reg_lock, flags); snd_gf1_write8(gus, SNDRV_GF1_GB_MEMORY_CONTROL, 0x01); snd_gf1_dram_addr(gus, address); outsb(GUSP(gus, DRAM), buffer, size1); spin_unlock_irqrestore(&gus->reg_lock, flags); address += size1; } else { pbuffer = buffer; size2 = size1; while (size2--) snd_gf1_poke(gus, address++, *pbuffer++); } size -= size1; _buffer += size1; } return 0; } int snd_gus_dram_write(struct snd_gus_card *gus, char __user *buffer, unsigned int address, unsigned int size) { return snd_gus_dram_poke(gus, buffer, address, size); } static int snd_gus_dram_peek(struct snd_gus_card *gus, char __user *_buffer, unsigned int address, unsigned int size, int rom) { unsigned long flags; unsigned int size1, size2; char buffer[256], *pbuffer; while (size > 0) { size1 = size > sizeof(buffer) ? sizeof(buffer) : size; if (gus->interwave) { spin_lock_irqsave(&gus->reg_lock, flags); snd_gf1_write8(gus, SNDRV_GF1_GB_MEMORY_CONTROL, rom ? 0x03 : 0x01); snd_gf1_dram_addr(gus, address); insb(GUSP(gus, DRAM), buffer, size1); snd_gf1_write8(gus, SNDRV_GF1_GB_MEMORY_CONTROL, 0x01); spin_unlock_irqrestore(&gus->reg_lock, flags); address += size1; } else { pbuffer = buffer; size2 = size1; while (size2--) *pbuffer++ = snd_gf1_peek(gus, address++); } if (copy_to_user(_buffer, buffer, size1)) return -EFAULT; size -= size1; _buffer += size1; } return 0; } int snd_gus_dram_read(struct snd_gus_card *gus, char __user *buffer, unsigned int address, unsigned int size, int rom) { return snd_gus_dram_peek(gus, buffer, address, size, rom); }
gpl-2.0
poondog/M8
drivers/video/msm/mdss/msm_mdss_io_8974.c
74
26615
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <mach/clk.h> #include <mach/msm_iomap.h> #include "mdss_dsi.h" #include "mdss_edp.h" #define SW_RESET BIT(2) #define SW_RESET_PLL BIT(0) #define PWRDN_B BIT(7) static struct dsi_clk_desc dsi_pclk; int mdss_dsi_clk_init(struct platform_device *pdev, struct mdss_dsi_ctrl_pdata *ctrl_pdata) { struct device *dev = NULL; int rc = 0; if (!pdev) { pr_err("%s: Invalid pdev\n", __func__); goto mdss_dsi_clk_err; } dev = &pdev->dev; ctrl_pdata->mdp_core_clk = clk_get(dev, "mdp_core_clk"); if (IS_ERR(ctrl_pdata->mdp_core_clk)) { rc = PTR_ERR(ctrl_pdata->mdp_core_clk); pr_err("%s: Unable to get mdp core clk. rc=%d\n", __func__, rc); goto mdss_dsi_clk_err; } ctrl_pdata->ahb_clk = clk_get(dev, "iface_clk"); if (IS_ERR(ctrl_pdata->ahb_clk)) { rc = PTR_ERR(ctrl_pdata->ahb_clk); pr_err("%s: Unable to get mdss ahb clk. rc=%d\n", __func__, rc); goto mdss_dsi_clk_err; } ctrl_pdata->axi_clk = clk_get(dev, "bus_clk"); if (IS_ERR(ctrl_pdata->axi_clk)) { rc = PTR_ERR(ctrl_pdata->axi_clk); pr_err("%s: Unable to get axi bus clk. rc=%d\n", __func__, rc); goto mdss_dsi_clk_err; } ctrl_pdata->byte_clk = clk_get(dev, "byte_clk"); if (IS_ERR(ctrl_pdata->byte_clk)) { rc = PTR_ERR(ctrl_pdata->byte_clk); pr_err("%s: can't find dsi_byte_clk. rc=%d\n", __func__, rc); ctrl_pdata->byte_clk = NULL; goto mdss_dsi_clk_err; } ctrl_pdata->pixel_clk = clk_get(dev, "pixel_clk"); if (IS_ERR(ctrl_pdata->pixel_clk)) { rc = PTR_ERR(ctrl_pdata->pixel_clk); pr_err("%s: can't find dsi_pixel_clk. rc=%d\n", __func__, rc); ctrl_pdata->pixel_clk = NULL; goto mdss_dsi_clk_err; } ctrl_pdata->esc_clk = clk_get(dev, "core_clk"); if (IS_ERR(ctrl_pdata->esc_clk)) { rc = PTR_ERR(ctrl_pdata->esc_clk); pr_err("%s: can't find dsi_esc_clk. rc=%d\n", __func__, rc); ctrl_pdata->esc_clk = NULL; goto mdss_dsi_clk_err; } mdss_dsi_clk_err: if (rc) mdss_dsi_clk_deinit(ctrl_pdata); return rc; } void mdss_dsi_clk_deinit(struct mdss_dsi_ctrl_pdata *ctrl_pdata) { if (ctrl_pdata->byte_clk) clk_put(ctrl_pdata->byte_clk); if (ctrl_pdata->esc_clk) clk_put(ctrl_pdata->esc_clk); if (ctrl_pdata->pixel_clk) clk_put(ctrl_pdata->pixel_clk); if (ctrl_pdata->axi_clk) clk_put(ctrl_pdata->axi_clk); if (ctrl_pdata->ahb_clk) clk_put(ctrl_pdata->ahb_clk); if (ctrl_pdata->mdp_core_clk) clk_put(ctrl_pdata->mdp_core_clk); } #define PREF_DIV_RATIO 27 struct dsiphy_pll_divider_config pll_divider_config; int mdss_dsi_clk_div_config(struct mdss_panel_info *panel_info, int frame_rate) { u32 fb_divider, rate, vco; u32 div_ratio = 0; u32 pll_analog_posDiv = 1; u32 h_period, v_period; u32 dsi_pclk_rate; u8 lanes = 0, bpp; struct dsi_clk_mnd_table const *mnd_entry = mnd_table; if (panel_info->mipi.data_lane3) lanes += 1; if (panel_info->mipi.data_lane2) lanes += 1; if (panel_info->mipi.data_lane1) lanes += 1; if (panel_info->mipi.data_lane0) lanes += 1; switch (panel_info->mipi.dst_format) { case DSI_CMD_DST_FORMAT_RGB888: case DSI_VIDEO_DST_FORMAT_RGB888: case DSI_VIDEO_DST_FORMAT_RGB666_LOOSE: bpp = 3; break; case DSI_CMD_DST_FORMAT_RGB565: case DSI_VIDEO_DST_FORMAT_RGB565: bpp = 2; break; default: bpp = 3; break; } h_period = mdss_panel_get_htotal(panel_info); v_period = mdss_panel_get_vtotal(panel_info); if ((frame_rate != panel_info->mipi.frame_rate) || (!panel_info->clk_rate)) { h_period += panel_info->lcdc.xres_pad; v_period += panel_info->lcdc.yres_pad; if (lanes > 0) { panel_info->clk_rate = ((h_period * v_period * frame_rate * bpp * 8) / lanes); } else { pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__); panel_info->clk_rate = (h_period * v_period * frame_rate * bpp * 8); } } pll_divider_config.clk_rate = panel_info->clk_rate; if (pll_divider_config.clk_rate == 0) pll_divider_config.clk_rate = 454000000; rate = (pll_divider_config.clk_rate / 2) / 1000000; if (rate < 43) { vco = rate * 16; div_ratio = 16; pll_analog_posDiv = 8; } else if (rate < 85) { vco = rate * 8; div_ratio = 8; pll_analog_posDiv = 4; } else if (rate < 170) { vco = rate * 4; div_ratio = 4; pll_analog_posDiv = 2; } else if (rate < 340) { vco = rate * 2; div_ratio = 2; pll_analog_posDiv = 1; } else { vco = rate * 1; div_ratio = 1; pll_analog_posDiv = 1; } for (; mnd_entry != mnd_table + ARRAY_SIZE(mnd_table); ++mnd_entry) { if (((mnd_entry->lanes) == lanes) && ((mnd_entry->bpp) == bpp)) break; } if (mnd_entry == mnd_table + ARRAY_SIZE(mnd_table)) { pr_err("%s: requested Lanes, %u & BPP, %u, not supported\n", __func__, lanes, bpp); return -EINVAL; } fb_divider = ((vco * PREF_DIV_RATIO) / 27); pll_divider_config.fb_divider = fb_divider; pll_divider_config.ref_divider_ratio = PREF_DIV_RATIO; pll_divider_config.bit_clk_divider = div_ratio; pll_divider_config.byte_clk_divider = pll_divider_config.bit_clk_divider * 8; pll_divider_config.analog_posDiv = pll_analog_posDiv; pll_divider_config.digital_posDiv = (mnd_entry->pll_digital_posDiv) * div_ratio; if ((mnd_entry->pclk_d == 0) || (mnd_entry->pclk_m == 1)) { dsi_pclk.mnd_mode = 0; dsi_pclk.src = 0x3; dsi_pclk.pre_div_func = (mnd_entry->pclk_n - 1); } else { dsi_pclk.mnd_mode = 2; dsi_pclk.src = 0x3; dsi_pclk.m = mnd_entry->pclk_m; dsi_pclk.n = mnd_entry->pclk_n; dsi_pclk.d = mnd_entry->pclk_d; } dsi_pclk_rate = (((pll_divider_config.clk_rate) * lanes) / (8 * bpp)); if ((dsi_pclk_rate < 3300000) || (dsi_pclk_rate > 250000000)) dsi_pclk_rate = 35000000; panel_info->mipi.dsi_pclk_rate = dsi_pclk_rate; return 0; } int mdss_dsi_enable_bus_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata) { int rc = 0; rc = clk_prepare_enable(ctrl_pdata->mdp_core_clk); if (rc) { pr_err("%s: failed to enable mdp_core_clock. rc=%d\n", __func__, rc); goto error; } rc = clk_prepare_enable(ctrl_pdata->ahb_clk); if (rc) { pr_err("%s: failed to enable ahb clock. rc=%d\n", __func__, rc); clk_disable_unprepare(ctrl_pdata->mdp_core_clk); goto error; } rc = clk_prepare_enable(ctrl_pdata->axi_clk); if (rc) { pr_err("%s: failed to enable ahb clock. rc=%d\n", __func__, rc); clk_disable_unprepare(ctrl_pdata->ahb_clk); clk_disable_unprepare(ctrl_pdata->mdp_core_clk); goto error; } error: return rc; } void mdss_dsi_disable_bus_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata) { clk_disable_unprepare(ctrl_pdata->axi_clk); clk_disable_unprepare(ctrl_pdata->ahb_clk); clk_disable_unprepare(ctrl_pdata->mdp_core_clk); } static int mdss_dsi_clk_prepare(struct mdss_dsi_ctrl_pdata *ctrl_pdata) { int rc = 0; rc = clk_prepare(ctrl_pdata->esc_clk); if (rc) { pr_err("%s: Failed to prepare dsi esc clk\n", __func__); goto esc_clk_err; } rc = clk_prepare(ctrl_pdata->byte_clk); if (rc) { pr_err("%s: Failed to prepare dsi byte clk\n", __func__); goto byte_clk_err; } rc = clk_prepare(ctrl_pdata->pixel_clk); if (rc) { pr_err("%s: Failed to prepare dsi pixel clk\n", __func__); goto pixel_clk_err; } return rc; pixel_clk_err: clk_unprepare(ctrl_pdata->byte_clk); byte_clk_err: clk_unprepare(ctrl_pdata->esc_clk); esc_clk_err: return rc; } static void mdss_dsi_clk_unprepare(struct mdss_dsi_ctrl_pdata *ctrl_pdata) { if (!ctrl_pdata) { pr_err("%s: Invalid input data\n", __func__); return; } clk_unprepare(ctrl_pdata->pixel_clk); clk_unprepare(ctrl_pdata->byte_clk); clk_unprepare(ctrl_pdata->esc_clk); } static int mdss_dsi_clk_set_rate(struct mdss_dsi_ctrl_pdata *ctrl_pdata) { u32 esc_clk_rate = 19200000; int rc = 0; if (!ctrl_pdata) { pr_err("%s: Invalid input data\n", __func__); return -EINVAL; } if (!ctrl_pdata->panel_data.panel_info.cont_splash_enabled) { pr_debug("%s: Set clk rates: pclk=%d, byteclk=%d escclk=%d\n", __func__, ctrl_pdata->pclk_rate, ctrl_pdata->byte_clk_rate, esc_clk_rate); rc = clk_set_rate(ctrl_pdata->esc_clk, esc_clk_rate); if (rc) { pr_err("%s: dsi_esc_clk - clk_set_rate failed\n", __func__); goto error; } rc = clk_set_rate(ctrl_pdata->byte_clk, ctrl_pdata->byte_clk_rate); if (rc) { pr_err("%s: dsi_byte_clk - clk_set_rate failed\n", __func__); goto error; } rc = clk_set_rate(ctrl_pdata->pixel_clk, ctrl_pdata->pclk_rate); if (rc) { pr_err("%s: dsi_pixel_clk - clk_set_rate failed\n", __func__); goto error; } } error: return rc; } static int mdss_dsi_clk_enable(struct mdss_dsi_ctrl_pdata *ctrl_pdata) { int rc = 0; if (!ctrl_pdata) { pr_err("%s: Invalid input data\n", __func__); return -EINVAL; } if (ctrl_pdata->mdss_dsi_clk_on) { pr_info("%s: mdss_dsi_clks already ON\n", __func__); return 0; } rc = clk_enable(ctrl_pdata->esc_clk); if (rc) { pr_err("%s: Failed to enable dsi esc clk\n", __func__); goto esc_clk_err; } rc = clk_enable(ctrl_pdata->byte_clk); if (rc) { pr_err("%s: Failed to enable dsi byte clk\n", __func__); goto byte_clk_err; } rc = clk_enable(ctrl_pdata->pixel_clk); if (rc) { pr_err("%s: Failed to enable dsi pixel clk\n", __func__); goto pixel_clk_err; } ctrl_pdata->mdss_dsi_clk_on = 1; return rc; pixel_clk_err: clk_disable(ctrl_pdata->byte_clk); byte_clk_err: clk_disable(ctrl_pdata->esc_clk); esc_clk_err: return rc; } static void mdss_dsi_clk_disable(struct mdss_dsi_ctrl_pdata *ctrl_pdata) { if (!ctrl_pdata) { pr_err("%s: Invalid input data\n", __func__); return; } if (ctrl_pdata->mdss_dsi_clk_on == 0) { pr_info("%s: mdss_dsi_clks already OFF\n", __func__); return; } clk_disable(ctrl_pdata->esc_clk); clk_disable(ctrl_pdata->pixel_clk); clk_disable(ctrl_pdata->byte_clk); ctrl_pdata->mdss_dsi_clk_on = 0; } int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable) { int rc = 0; mutex_lock(&ctrl->mutex); if (enable) { if (ctrl->clk_cnt == 0) { rc = mdss_dsi_enable_bus_clocks(ctrl); if (rc) { pr_err("%s: failed to enable bus clks. rc=%d\n", __func__, rc); goto error; } rc = mdss_dsi_clk_set_rate(ctrl); if (rc) { pr_err("%s: failed to set clk rates. rc=%d\n", __func__, rc); mdss_dsi_disable_bus_clocks(ctrl); goto error; } rc = mdss_dsi_clk_prepare(ctrl); if (rc) { pr_err("%s: failed to prepare clks. rc=%d\n", __func__, rc); mdss_dsi_disable_bus_clocks(ctrl); goto error; } rc = mdss_dsi_clk_enable(ctrl); if (rc) { pr_err("%s: failed to enable clks. rc=%d\n", __func__, rc); mdss_dsi_clk_unprepare(ctrl); mdss_dsi_disable_bus_clocks(ctrl); goto error; } } ctrl->clk_cnt++; } else { if (ctrl->clk_cnt) { ctrl->clk_cnt--; if (ctrl->clk_cnt == 0) { mdss_dsi_clk_disable(ctrl); mdss_dsi_clk_unprepare(ctrl); mdss_dsi_disable_bus_clocks(ctrl); } } } pr_debug("%s: ctrl ndx=%d enabled=%d clk_cnt=%d\n", __func__, ctrl->ndx, enable, ctrl->clk_cnt); error: mutex_unlock(&ctrl->mutex); return rc; } void mdss_dsi_phy_sw_reset(unsigned char *ctrl_base) { MIPI_OUTP(ctrl_base + 0x12c, 0x0001); udelay(1000); wmb(); MIPI_OUTP(ctrl_base + 0x12c, 0x0000); udelay(100); wmb(); } void mdss_dsi_phy_enable(struct mdss_dsi_ctrl_pdata *ctrl, int on) { static struct mdss_dsi_ctrl_pdata *left_ctrl; if (ctrl == NULL) { pr_err("%s: Invalid input data\n", __func__); return; } if (!left_ctrl && ctrl->shared_pdata.broadcast_enable) if ((ctrl->panel_data).panel_info.pdest == DISPLAY_1) left_ctrl = ctrl; if (on) { MIPI_OUTP(ctrl->ctrl_base + 0x03cc, 0x03); wmb(); usleep(100); MIPI_OUTP(ctrl->ctrl_base + 0x0220, 0x006); wmb(); usleep(100); MIPI_OUTP(ctrl->ctrl_base + 0x0268, 0x001); wmb(); usleep(100); MIPI_OUTP(ctrl->ctrl_base + 0x0268, 0x000); wmb(); usleep(100); MIPI_OUTP(ctrl->ctrl_base + 0x0220, 0x007); wmb(); MIPI_OUTP(ctrl->ctrl_base + 0x03cc, 0x01); wmb(); usleep(100); MIPI_OUTP(ctrl->ctrl_base + 0x0470, 0x07e); MIPI_OUTP(ctrl->ctrl_base + 0x0470, 0x06e); MIPI_OUTP(ctrl->ctrl_base + 0x0470, 0x06c); MIPI_OUTP(ctrl->ctrl_base + 0x0470, 0x064); MIPI_OUTP(ctrl->ctrl_base + 0x0470, 0x065); MIPI_OUTP(ctrl->ctrl_base + 0x0470, 0x075); MIPI_OUTP(ctrl->ctrl_base + 0x0470, 0x077); MIPI_OUTP(ctrl->ctrl_base + 0x0470, 0x07f); wmb(); } else { if (left_ctrl && (ctrl->panel_data.panel_info.pdest == DISPLAY_1)) return; if (left_ctrl && (ctrl->panel_data.panel_info.pdest == DISPLAY_2)) { MIPI_OUTP(left_ctrl->ctrl_base + 0x0220, 0x006); MIPI_OUTP(left_ctrl->ctrl_base + 0x0470, 0x000); MIPI_OUTP(left_ctrl->ctrl_base + 0x0598, 0x000); } MIPI_OUTP(ctrl->ctrl_base + 0x0220, 0x006); MIPI_OUTP(ctrl->ctrl_base + 0x0470, 0x000); MIPI_OUTP(ctrl->ctrl_base + 0x0598, 0x000); wmb(); } } void mdss_dsi_phy_init(struct mdss_panel_data *pdata) { struct mdss_dsi_phy_ctrl *pd; int i, off, ln, offset; struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL; ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata, panel_data); if (!ctrl_pdata) { pr_err("%s: Invalid input data\n", __func__); return; } pd = &(((ctrl_pdata->panel_data).panel_info.mipi).dsi_phy_db); MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0484, pd->strength[0]); if ((ctrl_pdata->panel_data).panel_info.pdest == DISPLAY_1) off = 0x0580; else off = 0x0580 - 0x600; MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 0), 0x0); MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 6), pd->regulator[6]); MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 5), pd->regulator[5]); MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 3), pd->regulator[3]); MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 2), pd->regulator[2]); MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 1), pd->regulator[1]); MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 0), pd->regulator[0]); MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 4), pd->regulator[4]); if ((ctrl_pdata->panel_data).panel_info.pdest == DISPLAY_1) MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x4dc, 0x00); else MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x4dc, 0x00); off = 0x0440; for (i = 0; i < 12; i++) { MIPI_OUTP((ctrl_pdata->ctrl_base) + off, pd->timing[i]); wmb(); off += 4; } MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0474, 0x00); MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0470, 0x5f); wmb(); MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0488, pd->strength[1]); wmb(); for (ln = 0; ln < 5; ln++) { off = 0x0300 + (ln * 0x40); for (i = 0; i < 9; i++) { offset = i + (ln * 9); MIPI_OUTP((ctrl_pdata->ctrl_base) + off, pd->lanecfg[offset]); wmb(); off += 4; } } MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0470, 0x5f); wmb(); if ((ctrl_pdata->panel_data).panel_info.pdest == DISPLAY_1) MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x04d4, 0x01); else MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x04d4, 0x00); wmb(); off = 0x04b4; for (i = 0; i < 6; i++) { MIPI_OUTP((ctrl_pdata->ctrl_base) + off, pd->bistctrl[i]); wmb(); off += 4; } } void mdss_edp_timing_engine_ctrl(unsigned char *edp_base, int enable) { edp_write(edp_base + 0x94, enable); } void mdss_edp_mainlink_ctrl(unsigned char *edp_base, int enable) { edp_write(edp_base + 0x04, enable); } void mdss_edp_mainlink_reset(unsigned char *edp_base) { edp_write(edp_base + 0x04, 0x02); usleep(1000); edp_write(edp_base + 0x04, 0); } void mdss_edp_aux_reset(unsigned char *edp_base) { edp_write(edp_base + 0x300, BIT(1)); usleep(1000); edp_write(edp_base + 0x300, 0); } void mdss_edp_aux_ctrl(unsigned char *edp_base, int enable) { u32 data; data = edp_read(edp_base + 0x300); if (enable) data |= 0x01; else data |= ~0x01; edp_write(edp_base + 0x300, data); } void mdss_edp_phy_pll_reset(unsigned char *edp_base) { edp_write(edp_base + 0x74, 0x005); usleep(1000); edp_write(edp_base + 0x74, 0x000); } int mdss_edp_phy_pll_ready(unsigned char *edp_base) { int cnt; u32 status; cnt = 10; while (cnt--) { status = edp_read(edp_base + 0x6c0); if (status & 0x01) break; usleep(100); } if (cnt == 0) { pr_err("%s: PLL NOT ready\n", __func__); return 0; } else return 1; } int mdss_edp_phy_ready(unsigned char *edp_base) { u32 status; status = edp_read(edp_base + 0x598); status &= 0x01; return status; } void mdss_edp_phy_powerup(unsigned char *edp_base, int enable) { if (enable) { edp_write(edp_base + 0x52c, 0x3f); edp_write(edp_base + 0x528, 0x1); edp_write(edp_base + 0x620, 0xf); } else { edp_write(edp_base + 0x52c, 0xc0); } } void mdss_edp_pll_configure(unsigned char *edp_base, int rate) { if (rate == 810000000) { edp_write(edp_base + 0x60c, 0x18); edp_write(edp_base + 0x664, 0x5); edp_write(edp_base + 0x600, 0x0); edp_write(edp_base + 0x638, 0x36); edp_write(edp_base + 0x63c, 0x69); edp_write(edp_base + 0x640, 0xff); edp_write(edp_base + 0x644, 0x2f); edp_write(edp_base + 0x648, 0x0); edp_write(edp_base + 0x66c, 0x0a); edp_write(edp_base + 0x674, 0x01); edp_write(edp_base + 0x684, 0x5a); edp_write(edp_base + 0x688, 0x0); edp_write(edp_base + 0x68c, 0x60); edp_write(edp_base + 0x690, 0x0); edp_write(edp_base + 0x694, 0x2a); edp_write(edp_base + 0x698, 0x3); edp_write(edp_base + 0x65c, 0x10); edp_write(edp_base + 0x660, 0x1a); edp_write(edp_base + 0x604, 0x0); edp_write(edp_base + 0x624, 0x0); edp_write(edp_base + 0x628, 0x0); edp_write(edp_base + 0x620, 0x1); edp_write(edp_base + 0x620, 0x5); edp_write(edp_base + 0x620, 0x7); edp_write(edp_base + 0x620, 0xf); } else if (rate == 138530000) { edp_write(edp_base + 0x664, 0x5); edp_write(edp_base + 0x600, 0x1); edp_write(edp_base + 0x638, 0x36); edp_write(edp_base + 0x63c, 0x62); edp_write(edp_base + 0x640, 0x0); edp_write(edp_base + 0x644, 0x28); edp_write(edp_base + 0x648, 0x0); edp_write(edp_base + 0x64c, 0x80); edp_write(edp_base + 0x650, 0x0); edp_write(edp_base + 0x654, 0x0); edp_write(edp_base + 0x658, 0x0); edp_write(edp_base + 0x66c, 0xa); edp_write(edp_base + 0x674, 0x1); edp_write(edp_base + 0x684, 0x5a); edp_write(edp_base + 0x688, 0x0); edp_write(edp_base + 0x68c, 0x60); edp_write(edp_base + 0x690, 0x0); edp_write(edp_base + 0x694, 0x46); edp_write(edp_base + 0x698, 0x5); edp_write(edp_base + 0x65c, 0x10); edp_write(edp_base + 0x660, 0x1a); edp_write(edp_base + 0x604, 0x0); edp_write(edp_base + 0x624, 0x0); edp_write(edp_base + 0x628, 0x0); edp_write(edp_base + 0x620, 0x1); edp_write(edp_base + 0x620, 0x5); edp_write(edp_base + 0x620, 0x7); edp_write(edp_base + 0x620, 0xf); } else { pr_err("%s: rate=%d is NOT supported\n", __func__, rate); } } void mdss_edp_enable_aux(unsigned char *edp_base, int enable) { if (!enable) { edp_write(edp_base + 0x300, 0); return; } edp_write(edp_base + 0x300, BIT(1)); edp_write(edp_base + 0x300, 0); edp_write(edp_base + 0x300, BIT(0)); edp_write(edp_base + 0x550, 0x2c); edp_write(edp_base + 0x308, 0xffffffff); edp_write(edp_base + 0x568, 0xff); } void mdss_edp_enable_mainlink(unsigned char *edp_base, int enable) { u32 data; data = edp_read(edp_base + 0x004); data &= ~BIT(0); if (enable) { data |= 0x1; edp_write(edp_base + 0x004, data); edp_write(edp_base + 0x004, 0x1); } else { data |= 0x0; edp_write(edp_base + 0x004, data); } } void mdss_edp_lane_power_ctrl(unsigned char *edp_base, int max_lane, int up) { int i, off; u32 data; if (up) data = 0; else data = 0x7; for (i = 0; i < max_lane; i++) { off = 0x40 * i; edp_write(edp_base + 0x404 + off , data); } } void mdss_edp_clk_deinit(struct mdss_edp_drv_pdata *edp_drv) { if (edp_drv->aux_clk) clk_put(edp_drv->aux_clk); if (edp_drv->pixel_clk) clk_put(edp_drv->pixel_clk); if (edp_drv->ahb_clk) clk_put(edp_drv->ahb_clk); if (edp_drv->link_clk) clk_put(edp_drv->link_clk); } int mdss_edp_clk_init(struct mdss_edp_drv_pdata *edp_drv) { struct device *dev = &(edp_drv->pdev->dev); edp_drv->aux_clk = clk_get(dev, "core_clk"); if (IS_ERR(edp_drv->aux_clk)) { pr_err("%s: Can't find aux_clk", __func__); edp_drv->aux_clk = NULL; goto mdss_edp_clk_err; } edp_drv->pixel_clk = clk_get(dev, "pixel_clk"); if (IS_ERR(edp_drv->pixel_clk)) { pr_err("%s: Can't find pixel_clk", __func__); edp_drv->pixel_clk = NULL; goto mdss_edp_clk_err; } edp_drv->ahb_clk = clk_get(dev, "iface_clk"); if (IS_ERR(edp_drv->ahb_clk)) { pr_err("%s: Can't find ahb_clk", __func__); edp_drv->ahb_clk = NULL; goto mdss_edp_clk_err; } edp_drv->link_clk = clk_get(dev, "link_clk"); if (IS_ERR(edp_drv->link_clk)) { pr_err("%s: Can't find link_clk", __func__); edp_drv->link_clk = NULL; goto mdss_edp_clk_err; } return 0; mdss_edp_clk_err: mdss_edp_clk_deinit(edp_drv); return -EPERM; } int mdss_edp_aux_clk_enable(struct mdss_edp_drv_pdata *edp_drv) { int ret; if (clk_set_rate(edp_drv->aux_clk, 19200000) < 0) pr_err("%s: aux_clk - clk_set_rate failed\n", __func__); ret = clk_enable(edp_drv->aux_clk); if (ret) { pr_err("%s: Failed to enable aux clk\n", __func__); goto c2; } ret = clk_enable(edp_drv->ahb_clk); if (ret) { pr_err("%s: Failed to enable ahb clk\n", __func__); goto c1; } return 0; c1: clk_disable(edp_drv->aux_clk); c2: return ret; } void mdss_edp_aux_clk_disable(struct mdss_edp_drv_pdata *edp_drv) { clk_disable(edp_drv->aux_clk); clk_disable(edp_drv->ahb_clk); } int mdss_edp_clk_enable(struct mdss_edp_drv_pdata *edp_drv) { int ret; if (edp_drv->clk_on) { pr_info("%s: edp clks are already ON\n", __func__); return 0; } if (clk_set_rate(edp_drv->aux_clk, 19200000) < 0) pr_err("%s: aux_clk - clk_set_rate failed\n", __func__); if (clk_set_rate(edp_drv->pixel_clk, 138500000) < 0) pr_err("%s: pixel_clk - clk_set_rate failed\n", __func__); if (clk_set_rate(edp_drv->link_clk, 270000000) < 0) pr_err("%s: link_clk - clk_set_rate failed\n", __func__); ret = clk_enable(edp_drv->aux_clk); if (ret) { pr_err("%s: Failed to enable aux clk\n", __func__); goto c4; } ret = clk_enable(edp_drv->pixel_clk); if (ret) { pr_err("%s: Failed to enable pixel clk\n", __func__); goto c3; } ret = clk_enable(edp_drv->ahb_clk); if (ret) { pr_err("%s: Failed to enable ahb clk\n", __func__); goto c2; } ret = clk_enable(edp_drv->link_clk); if (ret) { pr_err("%s: Failed to enable link clk\n", __func__); goto c1; } edp_drv->clk_on = 1; return 0; c1: clk_disable(edp_drv->ahb_clk); c2: clk_disable(edp_drv->pixel_clk); c3: clk_disable(edp_drv->aux_clk); c4: return ret; } void mdss_edp_clk_disable(struct mdss_edp_drv_pdata *edp_drv) { if (edp_drv->clk_on == 0) { pr_info("%s: edp clks are already OFF\n", __func__); return; } clk_disable(edp_drv->aux_clk); clk_disable(edp_drv->pixel_clk); clk_disable(edp_drv->ahb_clk); clk_disable(edp_drv->link_clk); edp_drv->clk_on = 0; } int mdss_edp_prepare_aux_clocks(struct mdss_edp_drv_pdata *edp_drv) { int ret; ret = clk_prepare(edp_drv->aux_clk); if (ret) { pr_err("%s: Failed to prepare aux clk\n", __func__); goto c2; } ret = clk_prepare(edp_drv->ahb_clk); if (ret) { pr_err("%s: Failed to prepare ahb clk\n", __func__); goto c1; } return 0; c1: clk_unprepare(edp_drv->aux_clk); c2: return ret; } void mdss_edp_unprepare_aux_clocks(struct mdss_edp_drv_pdata *edp_drv) { clk_unprepare(edp_drv->aux_clk); clk_unprepare(edp_drv->ahb_clk); } int mdss_edp_prepare_clocks(struct mdss_edp_drv_pdata *edp_drv) { int ret; ret = clk_prepare(edp_drv->aux_clk); if (ret) { pr_err("%s: Failed to prepare aux clk\n", __func__); goto c4; } ret = clk_prepare(edp_drv->pixel_clk); if (ret) { pr_err("%s: Failed to prepare pixel clk\n", __func__); goto c3; } ret = clk_prepare(edp_drv->ahb_clk); if (ret) { pr_err("%s: Failed to prepare ahb clk\n", __func__); goto c2; } ret = clk_prepare(edp_drv->link_clk); if (ret) { pr_err("%s: Failed to prepare link clk\n", __func__); goto c1; } return 0; c1: clk_unprepare(edp_drv->ahb_clk); c2: clk_unprepare(edp_drv->pixel_clk); c3: clk_unprepare(edp_drv->aux_clk); c4: return ret; } void mdss_edp_unprepare_clocks(struct mdss_edp_drv_pdata *edp_drv) { clk_unprepare(edp_drv->aux_clk); clk_unprepare(edp_drv->pixel_clk); clk_unprepare(edp_drv->ahb_clk); clk_unprepare(edp_drv->link_clk); } void mdss_edp_enable_pixel_clk(unsigned char *edp_base, unsigned char *mmss_cc_base, int enable) { if (!enable) { edp_write(mmss_cc_base + 0x032c, 0); return; } edp_write(edp_base + 0x624, 0x1); edp_write(mmss_cc_base + 0x00a8, 0x3f); edp_write(mmss_cc_base + 0x00ac, 0xb); edp_write(mmss_cc_base + 0x00b0, 0x0); edp_write(mmss_cc_base + 0x00a4, (5 << 8) | (2 << 12)); edp_write(mmss_cc_base + 0x00a0, 3); edp_write(mmss_cc_base + 0x032c, 1); } void mdss_edp_enable_link_clk(unsigned char *mmss_cc_base, int enable) { if (!enable) { edp_write(mmss_cc_base + 0x0330, 0); return; } edp_write(mmss_cc_base + 0x00c4, (4 << 8)); edp_write(mmss_cc_base + 0x00c0, 3); edp_write(mmss_cc_base + 0x0330, 1); } void mdss_edp_config_clk(unsigned char *edp_base, unsigned char *mmss_cc_base) { mdss_edp_enable_link_clk(mmss_cc_base, 1); mdss_edp_enable_pixel_clk(edp_base, mmss_cc_base, 1); } void mdss_edp_unconfig_clk(unsigned char *edp_base, unsigned char *mmss_cc_base) { mdss_edp_enable_link_clk(mmss_cc_base, 0); mdss_edp_enable_pixel_clk(edp_base, mmss_cc_base, 0); } void mdss_edp_clock_synchrous(unsigned char *edp_base, int sync) { u32 data; data = edp_read(edp_base + 0x02c); if (sync) data |= 0x01; else data &= ~0x01; edp_write(edp_base + 0x2c, data); } void mdss_edp_phy_vm_pe_init(unsigned char *edp_base) { edp_write(edp_base + 0x510, 0x3); edp_write(edp_base + 0x514, 0x64); edp_write(edp_base + 0x518, 0x6c); }
gpl-2.0
jepler/odroid-linux
arch/sparc/kernel/smp_64.c
74
36495
/* smp.c: Sparc64 SMP support. * * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/threads.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/cache.h> #include <linux/jiffies.h> #include <linux/profile.h> #include <linux/bootmem.h> #include <linux/vmalloc.h> #include <linux/ftrace.h> #include <linux/cpu.h> #include <linux/slab.h> #include <asm/head.h> #include <asm/ptrace.h> #include <linux/atomic.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/cpudata.h> #include <asm/hvtramp.h> #include <asm/io.h> #include <asm/timer.h> #include <asm/irq.h> #include <asm/irq_regs.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/oplib.h> #include <asm/uaccess.h> #include <asm/starfire.h> #include <asm/tlb.h> #include <asm/sections.h> #include <asm/prom.h> #include <asm/mdesc.h> #include <asm/ldc.h> #include <asm/hypervisor.h> #include <asm/pcr.h> #include "cpumap.h" int sparc64_multi_core __read_mostly; DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; cpumask_t cpu_core_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_SYMBOL(cpu_core_map); static cpumask_t smp_commenced_mask; void smp_info(struct seq_file *m) { int i; seq_printf(m, "State:\n"); for_each_online_cpu(i) seq_printf(m, "CPU%d:\t\tonline\n", i); } void smp_bogo(struct seq_file *m) { int i; for_each_online_cpu(i) seq_printf(m, "Cpu%dClkTck\t: %016lx\n", i, cpu_data(i).clock_tick); } extern void setup_sparc64_timer(void); static volatile unsigned long callin_flag = 0; void __cpuinit smp_callin(void) { int cpuid = hard_smp_processor_id(); __local_per_cpu_offset = __per_cpu_offset(cpuid); if (tlb_type == hypervisor) sun4v_ktsb_register(); __flush_tlb_all(); setup_sparc64_timer(); if (cheetah_pcache_forced_on) cheetah_enable_pcache(); callin_flag = 1; __asm__ __volatile__("membar #Sync\n\t" "flush %%g6" : : : "memory"); /* Clear this or we will die instantly when we * schedule back to this idler... */ current_thread_info()->new_child = 0; /* Attach to the address space of init_task. */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; /* inform the notifiers about the new cpu */ notify_cpu_starting(cpuid); while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) rmb(); set_cpu_online(cpuid, true); local_irq_enable(); /* idle thread is expected to have preempt disabled */ preempt_disable(); } void cpu_panic(void) { printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); panic("SMP bolixed\n"); } /* This tick register synchronization scheme is taken entirely from * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. * * The only change I've made is to rework it so that the master * initiates the synchonization instead of the slave. -DaveM */ #define MASTER 0 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long)) #define NUM_ROUNDS 64 /* magic value */ #define NUM_ITERS 5 /* likewise */ static DEFINE_SPINLOCK(itc_sync_lock); static unsigned long go[SLAVE + 1]; #define DEBUG_TICK_SYNC 0 static inline long get_delta (long *rt, long *master) { unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; unsigned long tcenter, t0, t1, tm; unsigned long i; for (i = 0; i < NUM_ITERS; i++) { t0 = tick_ops->get_tick(); go[MASTER] = 1; membar_safe("#StoreLoad"); while (!(tm = go[SLAVE])) rmb(); go[SLAVE] = 0; wmb(); t1 = tick_ops->get_tick(); if (t1 - t0 < best_t1 - best_t0) best_t0 = t0, best_t1 = t1, best_tm = tm; } *rt = best_t1 - best_t0; *master = best_tm - best_t0; /* average best_t0 and best_t1 without overflow: */ tcenter = (best_t0/2 + best_t1/2); if (best_t0 % 2 + best_t1 % 2 == 2) tcenter++; return tcenter - best_tm; } void smp_synchronize_tick_client(void) { long i, delta, adj, adjust_latency = 0, done = 0; unsigned long flags, rt, master_time_stamp; #if DEBUG_TICK_SYNC struct { long rt; /* roundtrip time */ long master; /* master's timestamp */ long diff; /* difference between midpoint and master's timestamp */ long lat; /* estimate of itc adjustment latency */ } t[NUM_ROUNDS]; #endif go[MASTER] = 1; while (go[MASTER]) rmb(); local_irq_save(flags); { for (i = 0; i < NUM_ROUNDS; i++) { delta = get_delta(&rt, &master_time_stamp); if (delta == 0) done = 1; /* let's lock on to this... */ if (!done) { if (i > 0) { adjust_latency += -delta; adj = -delta + adjust_latency/4; } else adj = -delta; tick_ops->add_tick(adj); } #if DEBUG_TICK_SYNC t[i].rt = rt; t[i].master = master_time_stamp; t[i].diff = delta; t[i].lat = adjust_latency/4; #endif } } local_irq_restore(flags); #if DEBUG_TICK_SYNC for (i = 0; i < NUM_ROUNDS; i++) printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", t[i].rt, t[i].master, t[i].diff, t[i].lat); #endif printk(KERN_INFO "CPU %d: synchronized TICK with master CPU " "(last diff %ld cycles, maxerr %lu cycles)\n", smp_processor_id(), delta, rt); } static void smp_start_sync_tick_client(int cpu); static void smp_synchronize_one_tick(int cpu) { unsigned long flags, i; go[MASTER] = 0; smp_start_sync_tick_client(cpu); /* wait for client to be ready */ while (!go[MASTER]) rmb(); /* now let the client proceed into his loop */ go[MASTER] = 0; membar_safe("#StoreLoad"); spin_lock_irqsave(&itc_sync_lock, flags); { for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { while (!go[MASTER]) rmb(); go[MASTER] = 0; wmb(); go[SLAVE] = tick_ops->get_tick(); membar_safe("#StoreLoad"); } } spin_unlock_irqrestore(&itc_sync_lock, flags); } #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) /* XXX Put this in some common place. XXX */ static unsigned long kimage_addr_to_ra(void *p) { unsigned long val = (unsigned long) p; return kern_base + (val - KERNBASE); } static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp) { extern unsigned long sparc64_ttable_tl0; extern unsigned long kern_locked_tte_data; struct hvtramp_descr *hdesc; unsigned long trampoline_ra; struct trap_per_cpu *tb; u64 tte_vaddr, tte_data; unsigned long hv_err; int i; hdesc = kzalloc(sizeof(*hdesc) + (sizeof(struct hvtramp_mapping) * num_kernel_image_mappings - 1), GFP_KERNEL); if (!hdesc) { printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " "hvtramp_descr.\n"); return; } *descrp = hdesc; hdesc->cpu = cpu; hdesc->num_mappings = num_kernel_image_mappings; tb = &trap_block[cpu]; hdesc->fault_info_va = (unsigned long) &tb->fault_info; hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); hdesc->thread_reg = thread_reg; tte_vaddr = (unsigned long) KERNBASE; tte_data = kern_locked_tte_data; for (i = 0; i < hdesc->num_mappings; i++) { hdesc->maps[i].vaddr = tte_vaddr; hdesc->maps[i].tte = tte_data; tte_vaddr += 0x400000; tte_data += 0x400000; } trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); hv_err = sun4v_cpu_start(cpu, trampoline_ra, kimage_addr_to_ra(&sparc64_ttable_tl0), __pa(hdesc)); if (hv_err) printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() " "gives error %lu\n", hv_err); } #endif extern unsigned long sparc64_cpu_startup; /* The OBP cpu startup callback truncates the 3rd arg cookie to * 32-bits (I think) so to be safe we have it read the pointer * contained here so we work on >4GB machines. -DaveM */ static struct thread_info *cpu_new_thread = NULL; static int __cpuinit smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) { unsigned long entry = (unsigned long)(&sparc64_cpu_startup); unsigned long cookie = (unsigned long)(&cpu_new_thread); void *descr = NULL; int timeout, ret; callin_flag = 0; cpu_new_thread = task_thread_info(idle); if (tlb_type == hypervisor) { #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) if (ldom_domaining_enabled) ldom_startcpu_cpuid(cpu, (unsigned long) cpu_new_thread, &descr); else #endif prom_startcpu_cpuid(cpu, entry, cookie); } else { struct device_node *dp = of_find_node_by_cpuid(cpu); prom_startcpu(dp->phandle, entry, cookie); } for (timeout = 0; timeout < 50000; timeout++) { if (callin_flag) break; udelay(100); } if (callin_flag) { ret = 0; } else { printk("Processor %d is stuck.\n", cpu); ret = -ENODEV; } cpu_new_thread = NULL; kfree(descr); return ret; } static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu) { u64 result, target; int stuck, tmp; if (this_is_starfire) { /* map to real upaid */ cpu = (((cpu & 0x3c) << 1) | ((cpu & 0x40) >> 4) | (cpu & 0x3)); } target = (cpu << 14) | 0x70; again: /* Ok, this is the real Spitfire Errata #54. * One must read back from a UDB internal register * after writes to the UDB interrupt dispatch, but * before the membar Sync for that write. * So we use the high UDB control register (ASI 0x7f, * ADDR 0x20) for the dummy read. -DaveM */ tmp = 0x40; __asm__ __volatile__( "wrpr %1, %2, %%pstate\n\t" "stxa %4, [%0] %3\n\t" "stxa %5, [%0+%8] %3\n\t" "add %0, %8, %0\n\t" "stxa %6, [%0+%8] %3\n\t" "membar #Sync\n\t" "stxa %%g0, [%7] %3\n\t" "membar #Sync\n\t" "mov 0x20, %%g1\n\t" "ldxa [%%g1] 0x7f, %%g0\n\t" "membar #Sync" : "=r" (tmp) : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W), "r" (data0), "r" (data1), "r" (data2), "r" (target), "r" (0x10), "0" (tmp) : "g1"); /* NOTE: PSTATE_IE is still clear. */ stuck = 100000; do { __asm__ __volatile__("ldxa [%%g0] %1, %0" : "=r" (result) : "i" (ASI_INTR_DISPATCH_STAT)); if (result == 0) { __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); return; } stuck -= 1; if (stuck == 0) break; } while (result & 0x1); __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); if (stuck == 0) { printk("CPU[%d]: mondo stuckage result[%016llx]\n", smp_processor_id(), result); } else { udelay(2); goto again; } } static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt) { u64 *mondo, data0, data1, data2; u16 *cpu_list; u64 pstate; int i; __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); cpu_list = __va(tb->cpu_list_pa); mondo = __va(tb->cpu_mondo_block_pa); data0 = mondo[0]; data1 = mondo[1]; data2 = mondo[2]; for (i = 0; i < cnt; i++) spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]); } /* Cheetah now allows to send the whole 64-bytes of data in the interrupt * packet, but we have no use for that. However we do take advantage of * the new pipelining feature (ie. dispatch to multiple cpus simultaneously). */ static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt) { int nack_busy_id, is_jbus, need_more; u64 *mondo, pstate, ver, busy_mask; u16 *cpu_list; cpu_list = __va(tb->cpu_list_pa); mondo = __va(tb->cpu_mondo_block_pa); /* Unfortunately, someone at Sun had the brilliant idea to make the * busy/nack fields hard-coded by ITID number for this Ultra-III * derivative processor. */ __asm__ ("rdpr %%ver, %0" : "=r" (ver)); is_jbus = ((ver >> 32) == __JALAPENO_ID || (ver >> 32) == __SERRANO_ID); __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); retry: need_more = 0; __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" : : "r" (pstate), "i" (PSTATE_IE)); /* Setup the dispatch data registers. */ __asm__ __volatile__("stxa %0, [%3] %6\n\t" "stxa %1, [%4] %6\n\t" "stxa %2, [%5] %6\n\t" "membar #Sync\n\t" : /* no outputs */ : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]), "r" (0x40), "r" (0x50), "r" (0x60), "i" (ASI_INTR_W)); nack_busy_id = 0; busy_mask = 0; { int i; for (i = 0; i < cnt; i++) { u64 target, nr; nr = cpu_list[i]; if (nr == 0xffff) continue; target = (nr << 14) | 0x70; if (is_jbus) { busy_mask |= (0x1UL << (nr * 2)); } else { target |= (nack_busy_id << 24); busy_mask |= (0x1UL << (nack_busy_id * 2)); } __asm__ __volatile__( "stxa %%g0, [%0] %1\n\t" "membar #Sync\n\t" : /* no outputs */ : "r" (target), "i" (ASI_INTR_W)); nack_busy_id++; if (nack_busy_id == 32) { need_more = 1; break; } } } /* Now, poll for completion. */ { u64 dispatch_stat, nack_mask; long stuck; stuck = 100000 * nack_busy_id; nack_mask = busy_mask << 1; do { __asm__ __volatile__("ldxa [%%g0] %1, %0" : "=r" (dispatch_stat) : "i" (ASI_INTR_DISPATCH_STAT)); if (!(dispatch_stat & (busy_mask | nack_mask))) { __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); if (unlikely(need_more)) { int i, this_cnt = 0; for (i = 0; i < cnt; i++) { if (cpu_list[i] == 0xffff) continue; cpu_list[i] = 0xffff; this_cnt++; if (this_cnt == 32) break; } goto retry; } return; } if (!--stuck) break; } while (dispatch_stat & busy_mask); __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); if (dispatch_stat & busy_mask) { /* Busy bits will not clear, continue instead * of freezing up on this cpu. */ printk("CPU[%d]: mondo stuckage result[%016llx]\n", smp_processor_id(), dispatch_stat); } else { int i, this_busy_nack = 0; /* Delay some random time with interrupts enabled * to prevent deadlock. */ udelay(2 * nack_busy_id); /* Clear out the mask bits for cpus which did not * NACK us. */ for (i = 0; i < cnt; i++) { u64 check_mask, nr; nr = cpu_list[i]; if (nr == 0xffff) continue; if (is_jbus) check_mask = (0x2UL << (2*nr)); else check_mask = (0x2UL << this_busy_nack); if ((dispatch_stat & check_mask) == 0) cpu_list[i] = 0xffff; this_busy_nack += 2; if (this_busy_nack == 64) break; } goto retry; } } } /* Multi-cpu list version. */ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) { int retries, this_cpu, prev_sent, i, saw_cpu_error; unsigned long status; u16 *cpu_list; this_cpu = smp_processor_id(); cpu_list = __va(tb->cpu_list_pa); saw_cpu_error = 0; retries = 0; prev_sent = 0; do { int forward_progress, n_sent; status = sun4v_cpu_mondo_send(cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); /* HV_EOK means all cpus received the xcall, we're done. */ if (likely(status == HV_EOK)) break; /* First, see if we made any forward progress. * * The hypervisor indicates successful sends by setting * cpu list entries to the value 0xffff. */ n_sent = 0; for (i = 0; i < cnt; i++) { if (likely(cpu_list[i] == 0xffff)) n_sent++; } forward_progress = 0; if (n_sent > prev_sent) forward_progress = 1; prev_sent = n_sent; /* If we get a HV_ECPUERROR, then one or more of the cpus * in the list are in error state. Use the cpu_state() * hypervisor call to find out which cpus are in error state. */ if (unlikely(status == HV_ECPUERROR)) { for (i = 0; i < cnt; i++) { long err; u16 cpu; cpu = cpu_list[i]; if (cpu == 0xffff) continue; err = sun4v_cpu_state(cpu); if (err == HV_CPU_STATE_ERROR) { saw_cpu_error = (cpu + 1); cpu_list[i] = 0xffff; } } } else if (unlikely(status != HV_EWOULDBLOCK)) goto fatal_mondo_error; /* Don't bother rewriting the CPU list, just leave the * 0xffff and non-0xffff entries in there and the * hypervisor will do the right thing. * * Only advance timeout state if we didn't make any * forward progress. */ if (unlikely(!forward_progress)) { if (unlikely(++retries > 10000)) goto fatal_mondo_timeout; /* Delay a little bit to let other cpus catch up * on their cpu mondo queue work. */ udelay(2 * cnt); } } while (1); if (unlikely(saw_cpu_error)) goto fatal_mondo_cpu_error; return; fatal_mondo_cpu_error: printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " "(including %d) were in error state\n", this_cpu, saw_cpu_error - 1); return; fatal_mondo_timeout: printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " " progress after %d retries.\n", this_cpu, retries); goto dump_cpu_list_and_out; fatal_mondo_error: printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", this_cpu, status); printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " "mondo_block_pa(%lx)\n", this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); dump_cpu_list_and_out: printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); for (i = 0; i < cnt; i++) printk("%u ", cpu_list[i]); printk("]\n"); } static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask) { struct trap_per_cpu *tb; int this_cpu, i, cnt; unsigned long flags; u16 *cpu_list; u64 *mondo; /* We have to do this whole thing with interrupts fully disabled. * Otherwise if we send an xcall from interrupt context it will * corrupt both our mondo block and cpu list state. * * One consequence of this is that we cannot use timeout mechanisms * that depend upon interrupts being delivered locally. So, for * example, we cannot sample jiffies and expect it to advance. * * Fortunately, udelay() uses %stick/%tick so we can use that. */ local_irq_save(flags); this_cpu = smp_processor_id(); tb = &trap_block[this_cpu]; mondo = __va(tb->cpu_mondo_block_pa); mondo[0] = data0; mondo[1] = data1; mondo[2] = data2; wmb(); cpu_list = __va(tb->cpu_list_pa); /* Setup the initial cpu list. */ cnt = 0; for_each_cpu(i, mask) { if (i == this_cpu || !cpu_online(i)) continue; cpu_list[cnt++] = i; } if (cnt) xcall_deliver_impl(tb, cnt); local_irq_restore(flags); } /* Send cross call to all processors mentioned in MASK_P * except self. Really, there are only two cases currently, * "cpu_online_mask" and "mm_cpumask(mm)". */ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) { u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); xcall_deliver(data0, data1, data2, mask); } /* Send cross call to all processors except self. */ static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) { smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask); } extern unsigned long xcall_sync_tick; static void smp_start_sync_tick_client(int cpu) { xcall_deliver((u64) &xcall_sync_tick, 0, 0, cpumask_of(cpu)); } extern unsigned long xcall_call_function; void arch_send_call_function_ipi_mask(const struct cpumask *mask) { xcall_deliver((u64) &xcall_call_function, 0, 0, mask); } extern unsigned long xcall_call_function_single; void arch_send_call_function_single_ipi(int cpu) { xcall_deliver((u64) &xcall_call_function_single, 0, 0, cpumask_of(cpu)); } void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); generic_smp_call_function_interrupt(); } void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); generic_smp_call_function_single_interrupt(); } static void tsb_sync(void *info) { struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()]; struct mm_struct *mm = info; /* It is not valid to test "current->active_mm == mm" here. * * The value of "current" is not changed atomically with * switch_mm(). But that's OK, we just need to check the * current cpu's trap block PGD physical address. */ if (tp->pgd_paddr == __pa(mm->pgd)) tsb_context_switch(mm); } void smp_tsb_sync(struct mm_struct *mm) { smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1); } extern unsigned long xcall_flush_tlb_mm; extern unsigned long xcall_flush_tlb_page; extern unsigned long xcall_flush_tlb_kernel_range; extern unsigned long xcall_fetch_glob_regs; extern unsigned long xcall_fetch_glob_pmu; extern unsigned long xcall_fetch_glob_pmu_n4; extern unsigned long xcall_receive_signal; extern unsigned long xcall_new_mmu_context_version; #ifdef CONFIG_KGDB extern unsigned long xcall_kgdb_capture; #endif #ifdef DCACHE_ALIASING_POSSIBLE extern unsigned long xcall_flush_dcache_page_cheetah; #endif extern unsigned long xcall_flush_dcache_page_spitfire; #ifdef CONFIG_DEBUG_DCFLUSH extern atomic_t dcpage_flushes; extern atomic_t dcpage_flushes_xcall; #endif static inline void __local_flush_dcache_page(struct page *page) { #ifdef DCACHE_ALIASING_POSSIBLE __flush_dcache_page(page_address(page), ((tlb_type == spitfire) && page_mapping(page) != NULL)); #else if (page_mapping(page) != NULL && tlb_type == spitfire) __flush_icache_page(__pa(page_address(page))); #endif } void smp_flush_dcache_page_impl(struct page *page, int cpu) { int this_cpu; if (tlb_type == hypervisor) return; #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes); #endif this_cpu = get_cpu(); if (cpu == this_cpu) { __local_flush_dcache_page(page); } else if (cpu_online(cpu)) { void *pg_addr = page_address(page); u64 data0 = 0; if (tlb_type == spitfire) { data0 = ((u64)&xcall_flush_dcache_page_spitfire); if (page_mapping(page) != NULL) data0 |= ((u64)1 << 32); } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { #ifdef DCACHE_ALIASING_POSSIBLE data0 = ((u64)&xcall_flush_dcache_page_cheetah); #endif } if (data0) { xcall_deliver(data0, __pa(pg_addr), (u64) pg_addr, cpumask_of(cpu)); #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes_xcall); #endif } } put_cpu(); } void flush_dcache_page_all(struct mm_struct *mm, struct page *page) { void *pg_addr; u64 data0; if (tlb_type == hypervisor) return; preempt_disable(); #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes); #endif data0 = 0; pg_addr = page_address(page); if (tlb_type == spitfire) { data0 = ((u64)&xcall_flush_dcache_page_spitfire); if (page_mapping(page) != NULL) data0 |= ((u64)1 << 32); } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { #ifdef DCACHE_ALIASING_POSSIBLE data0 = ((u64)&xcall_flush_dcache_page_cheetah); #endif } if (data0) { xcall_deliver(data0, __pa(pg_addr), (u64) pg_addr, cpu_online_mask); #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes_xcall); #endif } __local_flush_dcache_page(page); preempt_enable(); } void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) { struct mm_struct *mm; unsigned long flags; clear_softint(1 << irq); /* See if we need to allocate a new TLB context because * the version of the one we are using is now out of date. */ mm = current->active_mm; if (unlikely(!mm || (mm == &init_mm))) return; spin_lock_irqsave(&mm->context.lock, flags); if (unlikely(!CTX_VALID(mm->context))) get_new_mmu_context(mm); spin_unlock_irqrestore(&mm->context.lock, flags); load_secondary_context(mm); __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); } void smp_new_mmu_context_version(void) { smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); } #ifdef CONFIG_KGDB void kgdb_roundup_cpus(unsigned long flags) { smp_cross_call(&xcall_kgdb_capture, 0, 0, 0); } #endif void smp_fetch_global_regs(void) { smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); } void smp_fetch_global_pmu(void) { if (tlb_type == hypervisor && sun4v_chip_type >= SUN4V_CHIP_NIAGARA4) smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0); else smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0); } /* We know that the window frames of the user have been flushed * to the stack before we get here because all callers of us * are flush_tlb_*() routines, and these run after flush_cache_*() * which performs the flushw. * * The SMP TLB coherency scheme we use works as follows: * * 1) mm->cpu_vm_mask is a bit mask of which cpus an address * space has (potentially) executed on, this is the heuristic * we use to avoid doing cross calls. * * Also, for flushing from kswapd and also for clones, we * use cpu_vm_mask as the list of cpus to make run the TLB. * * 2) TLB context numbers are shared globally across all processors * in the system, this allows us to play several games to avoid * cross calls. * * One invariant is that when a cpu switches to a process, and * that processes tsk->active_mm->cpu_vm_mask does not have the * current cpu's bit set, that tlb context is flushed locally. * * If the address space is non-shared (ie. mm->count == 1) we avoid * cross calls when we want to flush the currently running process's * tlb state. This is done by clearing all cpu bits except the current * processor's in current->mm->cpu_vm_mask and performing the * flush locally only. This will force any subsequent cpus which run * this task to flush the context from the local tlb if the process * migrates to another cpu (again). * * 3) For shared address spaces (threads) and swapping we bite the * bullet for most cases and perform the cross call (but only to * the cpus listed in cpu_vm_mask). * * The performance gain from "optimizing" away the cross call for threads is * questionable (in theory the big win for threads is the massive sharing of * address space state across processors). */ /* This currently is only used by the hugetlb arch pre-fault * hook on UltraSPARC-III+ and later when changing the pagesize * bits of the context register for an address space. */ void smp_flush_tlb_mm(struct mm_struct *mm) { u32 ctx = CTX_HWBITS(mm->context); int cpu = get_cpu(); if (atomic_read(&mm->mm_users) == 1) { cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); goto local_flush_and_out; } smp_cross_call_masked(&xcall_flush_tlb_mm, ctx, 0, 0, mm_cpumask(mm)); local_flush_and_out: __flush_tlb_mm(ctx, SECONDARY_CONTEXT); put_cpu(); } struct tlb_pending_info { unsigned long ctx; unsigned long nr; unsigned long *vaddrs; }; static void tlb_pending_func(void *info) { struct tlb_pending_info *t = info; __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); } void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) { u32 ctx = CTX_HWBITS(mm->context); struct tlb_pending_info info; int cpu = get_cpu(); info.ctx = ctx; info.nr = nr; info.vaddrs = vaddrs; if (mm == current->mm && atomic_read(&mm->mm_users) == 1) cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); else smp_call_function_many(mm_cpumask(mm), tlb_pending_func, &info, 1); __flush_tlb_pending(ctx, nr, vaddrs); put_cpu(); } void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) { unsigned long context = CTX_HWBITS(mm->context); int cpu = get_cpu(); if (mm == current->mm && atomic_read(&mm->mm_users) == 1) cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); else smp_cross_call_masked(&xcall_flush_tlb_page, context, vaddr, 0, mm_cpumask(mm)); __flush_tlb_page(context, vaddr); put_cpu(); } void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) { start &= PAGE_MASK; end = PAGE_ALIGN(end); if (start != end) { smp_cross_call(&xcall_flush_tlb_kernel_range, 0, start, end); __flush_tlb_kernel_range(start, end); } } /* CPU capture. */ /* #define CAPTURE_DEBUG */ extern unsigned long xcall_capture; static atomic_t smp_capture_depth = ATOMIC_INIT(0); static atomic_t smp_capture_registry = ATOMIC_INIT(0); static unsigned long penguins_are_doing_time; void smp_capture(void) { int result = atomic_add_ret(1, &smp_capture_depth); if (result == 1) { int ncpus = num_online_cpus(); #ifdef CAPTURE_DEBUG printk("CPU[%d]: Sending penguins to jail...", smp_processor_id()); #endif penguins_are_doing_time = 1; atomic_inc(&smp_capture_registry); smp_cross_call(&xcall_capture, 0, 0, 0); while (atomic_read(&smp_capture_registry) != ncpus) rmb(); #ifdef CAPTURE_DEBUG printk("done\n"); #endif } } void smp_release(void) { if (atomic_dec_and_test(&smp_capture_depth)) { #ifdef CAPTURE_DEBUG printk("CPU[%d]: Giving pardon to " "imprisoned penguins\n", smp_processor_id()); #endif penguins_are_doing_time = 0; membar_safe("#StoreLoad"); atomic_dec(&smp_capture_registry); } } /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE * set, so they can service tlb flush xcalls... */ extern void prom_world(int); void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs) { clear_softint(1 << irq); preempt_disable(); __asm__ __volatile__("flushw"); prom_world(1); atomic_inc(&smp_capture_registry); membar_safe("#StoreLoad"); while (penguins_are_doing_time) rmb(); atomic_dec(&smp_capture_registry); prom_world(0); preempt_enable(); } /* /proc/profile writes can call this, don't __init it please. */ int setup_profiling_timer(unsigned int multiplier) { return -EINVAL; } void __init smp_prepare_cpus(unsigned int max_cpus) { } void smp_prepare_boot_cpu(void) { } void __init smp_setup_processor_id(void) { if (tlb_type == spitfire) xcall_deliver_impl = spitfire_xcall_deliver; else if (tlb_type == cheetah || tlb_type == cheetah_plus) xcall_deliver_impl = cheetah_xcall_deliver; else xcall_deliver_impl = hypervisor_xcall_deliver; } void smp_fill_in_sib_core_maps(void) { unsigned int i; for_each_present_cpu(i) { unsigned int j; cpumask_clear(&cpu_core_map[i]); if (cpu_data(i).core_id == 0) { cpumask_set_cpu(i, &cpu_core_map[i]); continue; } for_each_present_cpu(j) { if (cpu_data(i).core_id == cpu_data(j).core_id) cpumask_set_cpu(j, &cpu_core_map[i]); } } for_each_present_cpu(i) { unsigned int j; cpumask_clear(&per_cpu(cpu_sibling_map, i)); if (cpu_data(i).proc_id == -1) { cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i)); continue; } for_each_present_cpu(j) { if (cpu_data(i).proc_id == cpu_data(j).proc_id) cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i)); } } } int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) { int ret = smp_boot_one_cpu(cpu, tidle); if (!ret) { cpumask_set_cpu(cpu, &smp_commenced_mask); while (!cpu_online(cpu)) mb(); if (!cpu_online(cpu)) { ret = -ENODEV; } else { /* On SUN4V, writes to %tick and %stick are * not allowed. */ if (tlb_type != hypervisor) smp_synchronize_one_tick(cpu); } } return ret; } #ifdef CONFIG_HOTPLUG_CPU void cpu_play_dead(void) { int cpu = smp_processor_id(); unsigned long pstate; idle_task_exit(); if (tlb_type == hypervisor) { struct trap_per_cpu *tb = &trap_block[cpu]; sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO, tb->cpu_mondo_pa, 0); sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO, tb->dev_mondo_pa, 0); sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR, tb->resum_mondo_pa, 0); sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR, tb->nonresum_mondo_pa, 0); } cpumask_clear_cpu(cpu, &smp_commenced_mask); membar_safe("#Sync"); local_irq_disable(); __asm__ __volatile__( "rdpr %%pstate, %0\n\t" "wrpr %0, %1, %%pstate" : "=r" (pstate) : "i" (PSTATE_IE)); while (1) barrier(); } int __cpu_disable(void) { int cpu = smp_processor_id(); cpuinfo_sparc *c; int i; for_each_cpu(i, &cpu_core_map[cpu]) cpumask_clear_cpu(cpu, &cpu_core_map[i]); cpumask_clear(&cpu_core_map[cpu]); for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); cpumask_clear(&per_cpu(cpu_sibling_map, cpu)); c = &cpu_data(cpu); c->core_id = 0; c->proc_id = -1; smp_wmb(); /* Make sure no interrupts point to this cpu. */ fixup_irqs(); local_irq_enable(); mdelay(1); local_irq_disable(); set_cpu_online(cpu, false); cpu_map_rebuild(); return 0; } void __cpu_die(unsigned int cpu) { int i; for (i = 0; i < 100; i++) { smp_rmb(); if (!cpumask_test_cpu(cpu, &smp_commenced_mask)) break; msleep(100); } if (cpumask_test_cpu(cpu, &smp_commenced_mask)) { printk(KERN_ERR "CPU %u didn't die...\n", cpu); } else { #if defined(CONFIG_SUN_LDOMS) unsigned long hv_err; int limit = 100; do { hv_err = sun4v_cpu_stop(cpu); if (hv_err == HV_EOK) { set_cpu_present(cpu, false); break; } } while (--limit > 0); if (limit <= 0) { printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n", hv_err); } #endif } } #endif void __init smp_cpus_done(unsigned int max_cpus) { pcr_arch_init(); } void smp_send_reschedule(int cpu) { xcall_deliver((u64) &xcall_receive_signal, 0, 0, cpumask_of(cpu)); } void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); scheduler_ipi(); } /* This is a nop because we capture all other cpus * anyways when making the PROM active. */ void smp_send_stop(void) { } /** * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu * @cpu: cpu to allocate for * @size: size allocation in bytes * @align: alignment * * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper * does the right thing for NUMA regardless of the current * configuration. * * RETURNS: * Pointer to the allocated area on success, NULL on failure. */ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size, size_t align) { const unsigned long goal = __pa(MAX_DMA_ADDRESS); #ifdef CONFIG_NEED_MULTIPLE_NODES int node = cpu_to_node(cpu); void *ptr; if (!node_online(node) || !NODE_DATA(node)) { ptr = __alloc_bootmem(size, align, goal); pr_info("cpu %d has no node %d or node-local memory\n", cpu, node); pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", cpu, size, __pa(ptr)); } else { ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, goal); pr_debug("per cpu data for cpu%d %lu bytes on node%d at " "%016lx\n", cpu, size, node, __pa(ptr)); } return ptr; #else return __alloc_bootmem(size, align, goal); #endif } static void __init pcpu_free_bootmem(void *ptr, size_t size) { free_bootmem(__pa(ptr), size); } static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) { if (cpu_to_node(from) == cpu_to_node(to)) return LOCAL_DISTANCE; else return REMOTE_DISTANCE; } static void __init pcpu_populate_pte(unsigned long addr) { pgd_t *pgd = pgd_offset_k(addr); pud_t *pud; pmd_t *pmd; pud = pud_offset(pgd, addr); if (pud_none(*pud)) { pmd_t *new; new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); pud_populate(&init_mm, pud, new); } pmd = pmd_offset(pud, addr); if (!pmd_present(*pmd)) { pte_t *new; new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); pmd_populate_kernel(&init_mm, pmd, new); } } void __init setup_per_cpu_areas(void) { unsigned long delta; unsigned int cpu; int rc = -EINVAL; if (pcpu_chosen_fc != PCPU_FC_PAGE) { rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE, 4 << 20, pcpu_cpu_distance, pcpu_alloc_bootmem, pcpu_free_bootmem); if (rc) pr_warning("PERCPU: %s allocator failed (%d), " "falling back to page size\n", pcpu_fc_names[pcpu_chosen_fc], rc); } if (rc < 0) rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_alloc_bootmem, pcpu_free_bootmem, pcpu_populate_pte); if (rc < 0) panic("cannot initialize percpu area (err=%d)", rc); delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; /* Setup %g5 for the boot cpu. */ __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); of_fill_in_cpu_data(); if (tlb_type == hypervisor) mdesc_fill_in_cpu_data(cpu_all_mask); }
gpl-2.0
georgewhr/dbwrt
arch/arm/mach-prima2/common.c
330
1721
/* * Defines machines for CSR SiRFprimaII * * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. * * Licensed under GPLv2 or later. */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/sizes.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <linux/of.h> #include <linux/of_platform.h> #include "common.h" void __init sirfsoc_init_late(void) { sirfsoc_pm_init(); } static __init void sirfsoc_map_io(void) { sirfsoc_map_lluart(); sirfsoc_map_scu(); } #ifdef CONFIG_ARCH_ATLAS6 static const char *atlas6_dt_match[] __initdata = { "sirf,atlas6", NULL }; DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)") /* Maintainer: Barry Song <baohua.song@csr.com> */ .map_io = sirfsoc_map_io, .init_late = sirfsoc_init_late, .dt_compat = atlas6_dt_match, .restart = sirfsoc_restart, MACHINE_END #endif #ifdef CONFIG_ARCH_PRIMA2 static const char *prima2_dt_match[] __initdata = { "sirf,prima2", NULL }; DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)") /* Maintainer: Barry Song <baohua.song@csr.com> */ .map_io = sirfsoc_map_io, .dma_zone_size = SZ_256M, .init_late = sirfsoc_init_late, .dt_compat = prima2_dt_match, .restart = sirfsoc_restart, MACHINE_END #endif #ifdef CONFIG_ARCH_MARCO static const char *marco_dt_match[] __initdata = { "sirf,marco", NULL }; DT_MACHINE_START(MARCO_DT, "Generic MARCO (Flattened Device Tree)") /* Maintainer: Barry Song <baohua.song@csr.com> */ .smp = smp_ops(sirfsoc_smp_ops), .map_io = sirfsoc_map_io, .init_late = sirfsoc_init_late, .dt_compat = marco_dt_match, .restart = sirfsoc_restart, MACHINE_END #endif
gpl-2.0
AOSP-TEAM/android_kernel_samsung_i9100g
drivers/char/vme_scc.c
1098
30995
/* * drivers/char/vme_scc.c: MVME147, MVME162, BVME6000 SCC serial ports * implementation. * Copyright 1999 Richard Hirst <richard@sleepie.demon.co.uk> * * Based on atari_SCC.c which was * Copyright 1994-95 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de> * Partially based on PC-Linux serial.c by Linus Torvalds and Theodore Ts'o * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. * */ #include <linux/module.h> #include <linux/kdev_t.h> #include <asm/io.h> #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/mm.h> #include <linux/serial.h> #include <linux/fcntl.h> #include <linux/major.h> #include <linux/delay.h> #include <linux/miscdevice.h> #include <linux/console.h> #include <linux/init.h> #include <asm/setup.h> #include <asm/bootinfo.h> #ifdef CONFIG_MVME147_SCC #include <asm/mvme147hw.h> #endif #ifdef CONFIG_MVME162_SCC #include <asm/mvme16xhw.h> #endif #ifdef CONFIG_BVME6000_SCC #include <asm/bvme6000hw.h> #endif #include <linux/generic_serial.h> #include "scc.h" #define CHANNEL_A 0 #define CHANNEL_B 1 #define SCC_MINOR_BASE 64 /* Shadows for all SCC write registers */ static unsigned char scc_shadow[2][16]; /* Location to access for SCC register access delay */ static volatile unsigned char *scc_del = NULL; /* To keep track of STATUS_REG state for detection of Ext/Status int source */ static unsigned char scc_last_status_reg[2]; /***************************** Prototypes *****************************/ /* Function prototypes */ static void scc_disable_tx_interrupts(void * ptr); static void scc_enable_tx_interrupts(void * ptr); static void scc_disable_rx_interrupts(void * ptr); static void scc_enable_rx_interrupts(void * ptr); static int scc_carrier_raised(struct tty_port *port); static void scc_shutdown_port(void * ptr); static int scc_set_real_termios(void *ptr); static void scc_hungup(void *ptr); static void scc_close(void *ptr); static int scc_chars_in_buffer(void * ptr); static int scc_open(struct tty_struct * tty, struct file * filp); static int scc_ioctl(struct tty_struct * tty, struct file * filp, unsigned int cmd, unsigned long arg); static void scc_throttle(struct tty_struct *tty); static void scc_unthrottle(struct tty_struct *tty); static irqreturn_t scc_tx_int(int irq, void *data); static irqreturn_t scc_rx_int(int irq, void *data); static irqreturn_t scc_stat_int(int irq, void *data); static irqreturn_t scc_spcond_int(int irq, void *data); static void scc_setsignals(struct scc_port *port, int dtr, int rts); static int scc_break_ctl(struct tty_struct *tty, int break_state); static struct tty_driver *scc_driver; static struct scc_port scc_ports[2]; /*--------------------------------------------------------------------------- * Interface from generic_serial.c back here *--------------------------------------------------------------------------*/ static struct real_driver scc_real_driver = { scc_disable_tx_interrupts, scc_enable_tx_interrupts, scc_disable_rx_interrupts, scc_enable_rx_interrupts, scc_shutdown_port, scc_set_real_termios, scc_chars_in_buffer, scc_close, scc_hungup, NULL }; static const struct tty_operations scc_ops = { .open = scc_open, .close = gs_close, .write = gs_write, .put_char = gs_put_char, .flush_chars = gs_flush_chars, .write_room = gs_write_room, .chars_in_buffer = gs_chars_in_buffer, .flush_buffer = gs_flush_buffer, .ioctl = scc_ioctl, .throttle = scc_throttle, .unthrottle = scc_unthrottle, .set_termios = gs_set_termios, .stop = gs_stop, .start = gs_start, .hangup = gs_hangup, .break_ctl = scc_break_ctl, }; static const struct tty_port_operations scc_port_ops = { .carrier_raised = scc_carrier_raised, }; /*---------------------------------------------------------------------------- * vme_scc_init() and support functions *---------------------------------------------------------------------------*/ static int __init scc_init_drivers(void) { int error; scc_driver = alloc_tty_driver(2); if (!scc_driver) return -ENOMEM; scc_driver->owner = THIS_MODULE; scc_driver->driver_name = "scc"; scc_driver->name = "ttyS"; scc_driver->major = TTY_MAJOR; scc_driver->minor_start = SCC_MINOR_BASE; scc_driver->type = TTY_DRIVER_TYPE_SERIAL; scc_driver->subtype = SERIAL_TYPE_NORMAL; scc_driver->init_termios = tty_std_termios; scc_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; scc_driver->init_termios.c_ispeed = 9600; scc_driver->init_termios.c_ospeed = 9600; scc_driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(scc_driver, &scc_ops); if ((error = tty_register_driver(scc_driver))) { printk(KERN_ERR "scc: Couldn't register scc driver, error = %d\n", error); put_tty_driver(scc_driver); return 1; } return 0; } /* ports[] array is indexed by line no (i.e. [0] for ttyS0, [1] for ttyS1). */ static void __init scc_init_portstructs(void) { struct scc_port *port; int i; for (i = 0; i < 2; i++) { port = scc_ports + i; tty_port_init(&port->gs.port); port->gs.port.ops = &scc_port_ops; port->gs.magic = SCC_MAGIC; port->gs.close_delay = HZ/2; port->gs.closing_wait = 30 * HZ; port->gs.rd = &scc_real_driver; #ifdef NEW_WRITE_LOCKING port->gs.port_write_mutex = MUTEX; #endif init_waitqueue_head(&port->gs.port.open_wait); init_waitqueue_head(&port->gs.port.close_wait); } } #ifdef CONFIG_MVME147_SCC static int __init mvme147_scc_init(void) { struct scc_port *port; int error; printk(KERN_INFO "SCC: MVME147 Serial Driver\n"); /* Init channel A */ port = &scc_ports[0]; port->channel = CHANNEL_A; port->ctrlp = (volatile unsigned char *)M147_SCC_A_ADDR; port->datap = port->ctrlp + 1; port->port_a = &scc_ports[0]; port->port_b = &scc_ports[1]; error = request_irq(MVME147_IRQ_SCCA_TX, scc_tx_int, IRQF_DISABLED, "SCC-A TX", port); if (error) goto fail; error = request_irq(MVME147_IRQ_SCCA_STAT, scc_stat_int, IRQF_DISABLED, "SCC-A status", port); if (error) goto fail_free_a_tx; error = request_irq(MVME147_IRQ_SCCA_RX, scc_rx_int, IRQF_DISABLED, "SCC-A RX", port); if (error) goto fail_free_a_stat; error = request_irq(MVME147_IRQ_SCCA_SPCOND, scc_spcond_int, IRQF_DISABLED, "SCC-A special cond", port); if (error) goto fail_free_a_rx; { SCC_ACCESS_INIT(port); /* disable interrupts for this channel */ SCCwrite(INT_AND_DMA_REG, 0); /* Set the interrupt vector */ SCCwrite(INT_VECTOR_REG, MVME147_IRQ_SCC_BASE); /* Interrupt parameters: vector includes status, status low */ SCCwrite(MASTER_INT_CTRL, MIC_VEC_INCL_STAT); SCCmod(MASTER_INT_CTRL, 0xff, MIC_MASTER_INT_ENAB); } /* Init channel B */ port = &scc_ports[1]; port->channel = CHANNEL_B; port->ctrlp = (volatile unsigned char *)M147_SCC_B_ADDR; port->datap = port->ctrlp + 1; port->port_a = &scc_ports[0]; port->port_b = &scc_ports[1]; error = request_irq(MVME147_IRQ_SCCB_TX, scc_tx_int, IRQF_DISABLED, "SCC-B TX", port); if (error) goto fail_free_a_spcond; error = request_irq(MVME147_IRQ_SCCB_STAT, scc_stat_int, IRQF_DISABLED, "SCC-B status", port); if (error) goto fail_free_b_tx; error = request_irq(MVME147_IRQ_SCCB_RX, scc_rx_int, IRQF_DISABLED, "SCC-B RX", port); if (error) goto fail_free_b_stat; error = request_irq(MVME147_IRQ_SCCB_SPCOND, scc_spcond_int, IRQF_DISABLED, "SCC-B special cond", port); if (error) goto fail_free_b_rx; { SCC_ACCESS_INIT(port); /* disable interrupts for this channel */ SCCwrite(INT_AND_DMA_REG, 0); } /* Ensure interrupts are enabled in the PCC chip */ m147_pcc->serial_cntrl=PCC_LEVEL_SERIAL|PCC_INT_ENAB; /* Initialise the tty driver structures and register */ scc_init_portstructs(); scc_init_drivers(); return 0; fail_free_b_rx: free_irq(MVME147_IRQ_SCCB_RX, port); fail_free_b_stat: free_irq(MVME147_IRQ_SCCB_STAT, port); fail_free_b_tx: free_irq(MVME147_IRQ_SCCB_TX, port); fail_free_a_spcond: free_irq(MVME147_IRQ_SCCA_SPCOND, port); fail_free_a_rx: free_irq(MVME147_IRQ_SCCA_RX, port); fail_free_a_stat: free_irq(MVME147_IRQ_SCCA_STAT, port); fail_free_a_tx: free_irq(MVME147_IRQ_SCCA_TX, port); fail: return error; } #endif #ifdef CONFIG_MVME162_SCC static int __init mvme162_scc_init(void) { struct scc_port *port; int error; if (!(mvme16x_config & MVME16x_CONFIG_GOT_SCCA)) return (-ENODEV); printk(KERN_INFO "SCC: MVME162 Serial Driver\n"); /* Init channel A */ port = &scc_ports[0]; port->channel = CHANNEL_A; port->ctrlp = (volatile unsigned char *)MVME_SCC_A_ADDR; port->datap = port->ctrlp + 2; port->port_a = &scc_ports[0]; port->port_b = &scc_ports[1]; error = request_irq(MVME162_IRQ_SCCA_TX, scc_tx_int, IRQF_DISABLED, "SCC-A TX", port); if (error) goto fail; error = request_irq(MVME162_IRQ_SCCA_STAT, scc_stat_int, IRQF_DISABLED, "SCC-A status", port); if (error) goto fail_free_a_tx; error = request_irq(MVME162_IRQ_SCCA_RX, scc_rx_int, IRQF_DISABLED, "SCC-A RX", port); if (error) goto fail_free_a_stat; error = request_irq(MVME162_IRQ_SCCA_SPCOND, scc_spcond_int, IRQF_DISABLED, "SCC-A special cond", port); if (error) goto fail_free_a_rx; { SCC_ACCESS_INIT(port); /* disable interrupts for this channel */ SCCwrite(INT_AND_DMA_REG, 0); /* Set the interrupt vector */ SCCwrite(INT_VECTOR_REG, MVME162_IRQ_SCC_BASE); /* Interrupt parameters: vector includes status, status low */ SCCwrite(MASTER_INT_CTRL, MIC_VEC_INCL_STAT); SCCmod(MASTER_INT_CTRL, 0xff, MIC_MASTER_INT_ENAB); } /* Init channel B */ port = &scc_ports[1]; port->channel = CHANNEL_B; port->ctrlp = (volatile unsigned char *)MVME_SCC_B_ADDR; port->datap = port->ctrlp + 2; port->port_a = &scc_ports[0]; port->port_b = &scc_ports[1]; error = request_irq(MVME162_IRQ_SCCB_TX, scc_tx_int, IRQF_DISABLED, "SCC-B TX", port); if (error) goto fail_free_a_spcond; error = request_irq(MVME162_IRQ_SCCB_STAT, scc_stat_int, IRQF_DISABLED, "SCC-B status", port); if (error) goto fail_free_b_tx; error = request_irq(MVME162_IRQ_SCCB_RX, scc_rx_int, IRQF_DISABLED, "SCC-B RX", port); if (error) goto fail_free_b_stat; error = request_irq(MVME162_IRQ_SCCB_SPCOND, scc_spcond_int, IRQF_DISABLED, "SCC-B special cond", port); if (error) goto fail_free_b_rx; { SCC_ACCESS_INIT(port); /* Either channel will do */ /* disable interrupts for this channel */ SCCwrite(INT_AND_DMA_REG, 0); } /* Ensure interrupts are enabled in the MC2 chip */ *(volatile char *)0xfff4201d = 0x14; /* Initialise the tty driver structures and register */ scc_init_portstructs(); scc_init_drivers(); return 0; fail_free_b_rx: free_irq(MVME162_IRQ_SCCB_RX, port); fail_free_b_stat: free_irq(MVME162_IRQ_SCCB_STAT, port); fail_free_b_tx: free_irq(MVME162_IRQ_SCCB_TX, port); fail_free_a_spcond: free_irq(MVME162_IRQ_SCCA_SPCOND, port); fail_free_a_rx: free_irq(MVME162_IRQ_SCCA_RX, port); fail_free_a_stat: free_irq(MVME162_IRQ_SCCA_STAT, port); fail_free_a_tx: free_irq(MVME162_IRQ_SCCA_TX, port); fail: return error; } #endif #ifdef CONFIG_BVME6000_SCC static int __init bvme6000_scc_init(void) { struct scc_port *port; int error; printk(KERN_INFO "SCC: BVME6000 Serial Driver\n"); /* Init channel A */ port = &scc_ports[0]; port->channel = CHANNEL_A; port->ctrlp = (volatile unsigned char *)BVME_SCC_A_ADDR; port->datap = port->ctrlp + 4; port->port_a = &scc_ports[0]; port->port_b = &scc_ports[1]; error = request_irq(BVME_IRQ_SCCA_TX, scc_tx_int, IRQF_DISABLED, "SCC-A TX", port); if (error) goto fail; error = request_irq(BVME_IRQ_SCCA_STAT, scc_stat_int, IRQF_DISABLED, "SCC-A status", port); if (error) goto fail_free_a_tx; error = request_irq(BVME_IRQ_SCCA_RX, scc_rx_int, IRQF_DISABLED, "SCC-A RX", port); if (error) goto fail_free_a_stat; error = request_irq(BVME_IRQ_SCCA_SPCOND, scc_spcond_int, IRQF_DISABLED, "SCC-A special cond", port); if (error) goto fail_free_a_rx; { SCC_ACCESS_INIT(port); /* disable interrupts for this channel */ SCCwrite(INT_AND_DMA_REG, 0); /* Set the interrupt vector */ SCCwrite(INT_VECTOR_REG, BVME_IRQ_SCC_BASE); /* Interrupt parameters: vector includes status, status low */ SCCwrite(MASTER_INT_CTRL, MIC_VEC_INCL_STAT); SCCmod(MASTER_INT_CTRL, 0xff, MIC_MASTER_INT_ENAB); } /* Init channel B */ port = &scc_ports[1]; port->channel = CHANNEL_B; port->ctrlp = (volatile unsigned char *)BVME_SCC_B_ADDR; port->datap = port->ctrlp + 4; port->port_a = &scc_ports[0]; port->port_b = &scc_ports[1]; error = request_irq(BVME_IRQ_SCCB_TX, scc_tx_int, IRQF_DISABLED, "SCC-B TX", port); if (error) goto fail_free_a_spcond; error = request_irq(BVME_IRQ_SCCB_STAT, scc_stat_int, IRQF_DISABLED, "SCC-B status", port); if (error) goto fail_free_b_tx; error = request_irq(BVME_IRQ_SCCB_RX, scc_rx_int, IRQF_DISABLED, "SCC-B RX", port); if (error) goto fail_free_b_stat; error = request_irq(BVME_IRQ_SCCB_SPCOND, scc_spcond_int, IRQF_DISABLED, "SCC-B special cond", port); if (error) goto fail_free_b_rx; { SCC_ACCESS_INIT(port); /* Either channel will do */ /* disable interrupts for this channel */ SCCwrite(INT_AND_DMA_REG, 0); } /* Initialise the tty driver structures and register */ scc_init_portstructs(); scc_init_drivers(); return 0; fail: free_irq(BVME_IRQ_SCCA_STAT, port); fail_free_a_tx: free_irq(BVME_IRQ_SCCA_RX, port); fail_free_a_stat: free_irq(BVME_IRQ_SCCA_SPCOND, port); fail_free_a_rx: free_irq(BVME_IRQ_SCCB_TX, port); fail_free_a_spcond: free_irq(BVME_IRQ_SCCB_STAT, port); fail_free_b_tx: free_irq(BVME_IRQ_SCCB_RX, port); fail_free_b_stat: free_irq(BVME_IRQ_SCCB_SPCOND, port); fail_free_b_rx: return error; } #endif static int __init vme_scc_init(void) { int res = -ENODEV; #ifdef CONFIG_MVME147_SCC if (MACH_IS_MVME147) res = mvme147_scc_init(); #endif #ifdef CONFIG_MVME162_SCC if (MACH_IS_MVME16x) res = mvme162_scc_init(); #endif #ifdef CONFIG_BVME6000_SCC if (MACH_IS_BVME6000) res = bvme6000_scc_init(); #endif return res; } module_init(vme_scc_init); /*--------------------------------------------------------------------------- * Interrupt handlers *--------------------------------------------------------------------------*/ static irqreturn_t scc_rx_int(int irq, void *data) { unsigned char ch; struct scc_port *port = data; struct tty_struct *tty = port->gs.port.tty; SCC_ACCESS_INIT(port); ch = SCCread_NB(RX_DATA_REG); if (!tty) { printk(KERN_WARNING "scc_rx_int with NULL tty!\n"); SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET); return IRQ_HANDLED; } tty_insert_flip_char(tty, ch, 0); /* Check if another character is already ready; in that case, the * spcond_int() function must be used, because this character may have an * error condition that isn't signalled by the interrupt vector used! */ if (SCCread(INT_PENDING_REG) & (port->channel == CHANNEL_A ? IPR_A_RX : IPR_B_RX)) { scc_spcond_int (irq, data); return IRQ_HANDLED; } SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET); tty_flip_buffer_push(tty); return IRQ_HANDLED; } static irqreturn_t scc_spcond_int(int irq, void *data) { struct scc_port *port = data; struct tty_struct *tty = port->gs.port.tty; unsigned char stat, ch, err; int int_pending_mask = port->channel == CHANNEL_A ? IPR_A_RX : IPR_B_RX; SCC_ACCESS_INIT(port); if (!tty) { printk(KERN_WARNING "scc_spcond_int with NULL tty!\n"); SCCwrite(COMMAND_REG, CR_ERROR_RESET); SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET); return IRQ_HANDLED; } do { stat = SCCread(SPCOND_STATUS_REG); ch = SCCread_NB(RX_DATA_REG); if (stat & SCSR_RX_OVERRUN) err = TTY_OVERRUN; else if (stat & SCSR_PARITY_ERR) err = TTY_PARITY; else if (stat & SCSR_CRC_FRAME_ERR) err = TTY_FRAME; else err = 0; tty_insert_flip_char(tty, ch, err); /* ++TeSche: *All* errors have to be cleared manually, * else the condition persists for the next chars */ if (err) SCCwrite(COMMAND_REG, CR_ERROR_RESET); } while(SCCread(INT_PENDING_REG) & int_pending_mask); SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET); tty_flip_buffer_push(tty); return IRQ_HANDLED; } static irqreturn_t scc_tx_int(int irq, void *data) { struct scc_port *port = data; SCC_ACCESS_INIT(port); if (!port->gs.port.tty) { printk(KERN_WARNING "scc_tx_int with NULL tty!\n"); SCCmod (INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0); SCCwrite(COMMAND_REG, CR_TX_PENDING_RESET); SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET); return IRQ_HANDLED; } while ((SCCread_NB(STATUS_REG) & SR_TX_BUF_EMPTY)) { if (port->x_char) { SCCwrite(TX_DATA_REG, port->x_char); port->x_char = 0; } else if ((port->gs.xmit_cnt <= 0) || port->gs.port.tty->stopped || port->gs.port.tty->hw_stopped) break; else { SCCwrite(TX_DATA_REG, port->gs.xmit_buf[port->gs.xmit_tail++]); port->gs.xmit_tail = port->gs.xmit_tail & (SERIAL_XMIT_SIZE-1); if (--port->gs.xmit_cnt <= 0) break; } } if ((port->gs.xmit_cnt <= 0) || port->gs.port.tty->stopped || port->gs.port.tty->hw_stopped) { /* disable tx interrupts */ SCCmod (INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0); SCCwrite(COMMAND_REG, CR_TX_PENDING_RESET); /* disable tx_int on next tx underrun? */ port->gs.port.flags &= ~GS_TX_INTEN; } if (port->gs.port.tty && port->gs.xmit_cnt <= port->gs.wakeup_chars) tty_wakeup(port->gs.port.tty); SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET); return IRQ_HANDLED; } static irqreturn_t scc_stat_int(int irq, void *data) { struct scc_port *port = data; unsigned channel = port->channel; unsigned char last_sr, sr, changed; SCC_ACCESS_INIT(port); last_sr = scc_last_status_reg[channel]; sr = scc_last_status_reg[channel] = SCCread_NB(STATUS_REG); changed = last_sr ^ sr; if (changed & SR_DCD) { port->c_dcd = !!(sr & SR_DCD); if (!(port->gs.port.flags & ASYNC_CHECK_CD)) ; /* Don't report DCD changes */ else if (port->c_dcd) { wake_up_interruptible(&port->gs.port.open_wait); } else { if (port->gs.port.tty) tty_hangup (port->gs.port.tty); } } SCCwrite(COMMAND_REG, CR_EXTSTAT_RESET); SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET); return IRQ_HANDLED; } /*--------------------------------------------------------------------------- * generic_serial.c callback funtions *--------------------------------------------------------------------------*/ static void scc_disable_tx_interrupts(void *ptr) { struct scc_port *port = ptr; unsigned long flags; SCC_ACCESS_INIT(port); local_irq_save(flags); SCCmod(INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0); port->gs.port.flags &= ~GS_TX_INTEN; local_irq_restore(flags); } static void scc_enable_tx_interrupts(void *ptr) { struct scc_port *port = ptr; unsigned long flags; SCC_ACCESS_INIT(port); local_irq_save(flags); SCCmod(INT_AND_DMA_REG, 0xff, IDR_TX_INT_ENAB); /* restart the transmitter */ scc_tx_int (0, port); local_irq_restore(flags); } static void scc_disable_rx_interrupts(void *ptr) { struct scc_port *port = ptr; unsigned long flags; SCC_ACCESS_INIT(port); local_irq_save(flags); SCCmod(INT_AND_DMA_REG, ~(IDR_RX_INT_MASK|IDR_PARERR_AS_SPCOND|IDR_EXTSTAT_INT_ENAB), 0); local_irq_restore(flags); } static void scc_enable_rx_interrupts(void *ptr) { struct scc_port *port = ptr; unsigned long flags; SCC_ACCESS_INIT(port); local_irq_save(flags); SCCmod(INT_AND_DMA_REG, 0xff, IDR_EXTSTAT_INT_ENAB|IDR_PARERR_AS_SPCOND|IDR_RX_INT_ALL); local_irq_restore(flags); } static int scc_carrier_raised(struct tty_port *port) { struct scc_port *sc = container_of(port, struct scc_port, gs.port); unsigned channel = sc->channel; return !!(scc_last_status_reg[channel] & SR_DCD); } static void scc_shutdown_port(void *ptr) { struct scc_port *port = ptr; port->gs.port.flags &= ~ GS_ACTIVE; if (port->gs.port.tty && (port->gs.port.tty->termios->c_cflag & HUPCL)) { scc_setsignals (port, 0, 0); } } static int scc_set_real_termios (void *ptr) { /* the SCC has char sizes 5,7,6,8 in that order! */ static int chsize_map[4] = { 0, 2, 1, 3 }; unsigned cflag, baud, chsize, channel, brgval = 0; unsigned long flags; struct scc_port *port = ptr; SCC_ACCESS_INIT(port); if (!port->gs.port.tty || !port->gs.port.tty->termios) return 0; channel = port->channel; if (channel == CHANNEL_A) return 0; /* Settings controlled by boot PROM */ cflag = port->gs.port.tty->termios->c_cflag; baud = port->gs.baud; chsize = (cflag & CSIZE) >> 4; if (baud == 0) { /* speed == 0 -> drop DTR */ local_irq_save(flags); SCCmod(TX_CTRL_REG, ~TCR_DTR, 0); local_irq_restore(flags); return 0; } else if ((MACH_IS_MVME16x && (baud < 50 || baud > 38400)) || (MACH_IS_MVME147 && (baud < 50 || baud > 19200)) || (MACH_IS_BVME6000 &&(baud < 50 || baud > 76800))) { printk(KERN_NOTICE "SCC: Bad speed requested, %d\n", baud); return 0; } if (cflag & CLOCAL) port->gs.port.flags &= ~ASYNC_CHECK_CD; else port->gs.port.flags |= ASYNC_CHECK_CD; #ifdef CONFIG_MVME147_SCC if (MACH_IS_MVME147) brgval = (M147_SCC_PCLK + baud/2) / (16 * 2 * baud) - 2; #endif #ifdef CONFIG_MVME162_SCC if (MACH_IS_MVME16x) brgval = (MVME_SCC_PCLK + baud/2) / (16 * 2 * baud) - 2; #endif #ifdef CONFIG_BVME6000_SCC if (MACH_IS_BVME6000) brgval = (BVME_SCC_RTxC + baud/2) / (16 * 2 * baud) - 2; #endif /* Now we have all parameters and can go to set them: */ local_irq_save(flags); /* receiver's character size and auto-enables */ SCCmod(RX_CTRL_REG, ~(RCR_CHSIZE_MASK|RCR_AUTO_ENAB_MODE), (chsize_map[chsize] << 6) | ((cflag & CRTSCTS) ? RCR_AUTO_ENAB_MODE : 0)); /* parity and stop bits (both, Tx and Rx), clock mode never changes */ SCCmod (AUX1_CTRL_REG, ~(A1CR_PARITY_MASK | A1CR_MODE_MASK), ((cflag & PARENB ? (cflag & PARODD ? A1CR_PARITY_ODD : A1CR_PARITY_EVEN) : A1CR_PARITY_NONE) | (cflag & CSTOPB ? A1CR_MODE_ASYNC_2 : A1CR_MODE_ASYNC_1))); /* sender's character size, set DTR for valid baud rate */ SCCmod(TX_CTRL_REG, ~TCR_CHSIZE_MASK, chsize_map[chsize] << 5 | TCR_DTR); /* clock sources never change */ /* disable BRG before changing the value */ SCCmod(DPLL_CTRL_REG, ~DCR_BRG_ENAB, 0); /* BRG value */ SCCwrite(TIMER_LOW_REG, brgval & 0xff); SCCwrite(TIMER_HIGH_REG, (brgval >> 8) & 0xff); /* BRG enable, and clock source never changes */ SCCmod(DPLL_CTRL_REG, 0xff, DCR_BRG_ENAB); local_irq_restore(flags); return 0; } static int scc_chars_in_buffer (void *ptr) { struct scc_port *port = ptr; SCC_ACCESS_INIT(port); return (SCCread (SPCOND_STATUS_REG) & SCSR_ALL_SENT) ? 0 : 1; } /* Comment taken from sx.c (2.4.0): I haven't the foggiest why the decrement use count has to happen here. The whole linux serial drivers stuff needs to be redesigned. My guess is that this is a hack to minimize the impact of a bug elsewhere. Thinking about it some more. (try it sometime) Try running minicom on a serial port that is driven by a modularized driver. Have the modem hangup. Then remove the driver module. Then exit minicom. I expect an "oops". -- REW */ static void scc_hungup(void *ptr) { scc_disable_tx_interrupts(ptr); scc_disable_rx_interrupts(ptr); } static void scc_close(void *ptr) { scc_disable_tx_interrupts(ptr); scc_disable_rx_interrupts(ptr); } /*--------------------------------------------------------------------------- * Internal support functions *--------------------------------------------------------------------------*/ static void scc_setsignals(struct scc_port *port, int dtr, int rts) { unsigned long flags; unsigned char t; SCC_ACCESS_INIT(port); local_irq_save(flags); t = SCCread(TX_CTRL_REG); if (dtr >= 0) t = dtr? (t | TCR_DTR): (t & ~TCR_DTR); if (rts >= 0) t = rts? (t | TCR_RTS): (t & ~TCR_RTS); SCCwrite(TX_CTRL_REG, t); local_irq_restore(flags); } static void scc_send_xchar(struct tty_struct *tty, char ch) { struct scc_port *port = tty->driver_data; port->x_char = ch; if (ch) scc_enable_tx_interrupts(port); } /*--------------------------------------------------------------------------- * Driver entrypoints referenced from above *--------------------------------------------------------------------------*/ static int scc_open (struct tty_struct * tty, struct file * filp) { int line = tty->index; int retval; struct scc_port *port = &scc_ports[line]; int i, channel = port->channel; unsigned long flags; SCC_ACCESS_INIT(port); #if defined(CONFIG_MVME162_SCC) || defined(CONFIG_MVME147_SCC) static const struct { unsigned reg, val; } mvme_init_tab[] = { /* Values for MVME162 and MVME147 */ /* no parity, 1 stop bit, async, 1:16 */ { AUX1_CTRL_REG, A1CR_PARITY_NONE|A1CR_MODE_ASYNC_1|A1CR_CLKMODE_x16 }, /* parity error is special cond, ints disabled, no DMA */ { INT_AND_DMA_REG, IDR_PARERR_AS_SPCOND | IDR_RX_INT_DISAB }, /* Rx 8 bits/char, no auto enable, Rx off */ { RX_CTRL_REG, RCR_CHSIZE_8 }, /* DTR off, Tx 8 bits/char, RTS off, Tx off */ { TX_CTRL_REG, TCR_CHSIZE_8 }, /* special features off */ { AUX2_CTRL_REG, 0 }, { CLK_CTRL_REG, CCR_RXCLK_BRG | CCR_TXCLK_BRG }, { DPLL_CTRL_REG, DCR_BRG_ENAB | DCR_BRG_USE_PCLK }, /* Start Rx */ { RX_CTRL_REG, RCR_RX_ENAB | RCR_CHSIZE_8 }, /* Start Tx */ { TX_CTRL_REG, TCR_TX_ENAB | TCR_RTS | TCR_DTR | TCR_CHSIZE_8 }, /* Ext/Stat ints: DCD only */ { INT_CTRL_REG, ICR_ENAB_DCD_INT }, /* Reset Ext/Stat ints */ { COMMAND_REG, CR_EXTSTAT_RESET }, /* ...again */ { COMMAND_REG, CR_EXTSTAT_RESET }, }; #endif #if defined(CONFIG_BVME6000_SCC) static const struct { unsigned reg, val; } bvme_init_tab[] = { /* Values for BVME6000 */ /* no parity, 1 stop bit, async, 1:16 */ { AUX1_CTRL_REG, A1CR_PARITY_NONE|A1CR_MODE_ASYNC_1|A1CR_CLKMODE_x16 }, /* parity error is special cond, ints disabled, no DMA */ { INT_AND_DMA_REG, IDR_PARERR_AS_SPCOND | IDR_RX_INT_DISAB }, /* Rx 8 bits/char, no auto enable, Rx off */ { RX_CTRL_REG, RCR_CHSIZE_8 }, /* DTR off, Tx 8 bits/char, RTS off, Tx off */ { TX_CTRL_REG, TCR_CHSIZE_8 }, /* special features off */ { AUX2_CTRL_REG, 0 }, { CLK_CTRL_REG, CCR_RTxC_XTAL | CCR_RXCLK_BRG | CCR_TXCLK_BRG }, { DPLL_CTRL_REG, DCR_BRG_ENAB }, /* Start Rx */ { RX_CTRL_REG, RCR_RX_ENAB | RCR_CHSIZE_8 }, /* Start Tx */ { TX_CTRL_REG, TCR_TX_ENAB | TCR_RTS | TCR_DTR | TCR_CHSIZE_8 }, /* Ext/Stat ints: DCD only */ { INT_CTRL_REG, ICR_ENAB_DCD_INT }, /* Reset Ext/Stat ints */ { COMMAND_REG, CR_EXTSTAT_RESET }, /* ...again */ { COMMAND_REG, CR_EXTSTAT_RESET }, }; #endif if (!(port->gs.port.flags & ASYNC_INITIALIZED)) { local_irq_save(flags); #if defined(CONFIG_MVME147_SCC) || defined(CONFIG_MVME162_SCC) if (MACH_IS_MVME147 || MACH_IS_MVME16x) { for (i = 0; i < ARRAY_SIZE(mvme_init_tab); ++i) SCCwrite(mvme_init_tab[i].reg, mvme_init_tab[i].val); } #endif #if defined(CONFIG_BVME6000_SCC) if (MACH_IS_BVME6000) { for (i = 0; i < ARRAY_SIZE(bvme_init_tab); ++i) SCCwrite(bvme_init_tab[i].reg, bvme_init_tab[i].val); } #endif /* remember status register for detection of DCD and CTS changes */ scc_last_status_reg[channel] = SCCread(STATUS_REG); port->c_dcd = 0; /* Prevent initial 1->0 interrupt */ scc_setsignals (port, 1,1); local_irq_restore(flags); } tty->driver_data = port; port->gs.port.tty = tty; port->gs.port.count++; retval = gs_init_port(&port->gs); if (retval) { port->gs.port.count--; return retval; } port->gs.port.flags |= GS_ACTIVE; retval = gs_block_til_ready(port, filp); if (retval) { port->gs.port.count--; return retval; } port->c_dcd = tty_port_carrier_raised(&port->gs.port); scc_enable_rx_interrupts(port); return 0; } static void scc_throttle (struct tty_struct * tty) { struct scc_port *port = tty->driver_data; unsigned long flags; SCC_ACCESS_INIT(port); if (tty->termios->c_cflag & CRTSCTS) { local_irq_save(flags); SCCmod(TX_CTRL_REG, ~TCR_RTS, 0); local_irq_restore(flags); } if (I_IXOFF(tty)) scc_send_xchar(tty, STOP_CHAR(tty)); } static void scc_unthrottle (struct tty_struct * tty) { struct scc_port *port = tty->driver_data; unsigned long flags; SCC_ACCESS_INIT(port); if (tty->termios->c_cflag & CRTSCTS) { local_irq_save(flags); SCCmod(TX_CTRL_REG, 0xff, TCR_RTS); local_irq_restore(flags); } if (I_IXOFF(tty)) scc_send_xchar(tty, START_CHAR(tty)); } static int scc_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { return -ENOIOCTLCMD; } static int scc_break_ctl(struct tty_struct *tty, int break_state) { struct scc_port *port = tty->driver_data; unsigned long flags; SCC_ACCESS_INIT(port); local_irq_save(flags); SCCmod(TX_CTRL_REG, ~TCR_SEND_BREAK, break_state ? TCR_SEND_BREAK : 0); local_irq_restore(flags); return 0; } /*--------------------------------------------------------------------------- * Serial console stuff... *--------------------------------------------------------------------------*/ #define scc_delay() do { __asm__ __volatile__ (" nop; nop"); } while (0) static void scc_ch_write (char ch) { volatile char *p = NULL; #ifdef CONFIG_MVME147_SCC if (MACH_IS_MVME147) p = (volatile char *)M147_SCC_A_ADDR; #endif #ifdef CONFIG_MVME162_SCC if (MACH_IS_MVME16x) p = (volatile char *)MVME_SCC_A_ADDR; #endif #ifdef CONFIG_BVME6000_SCC if (MACH_IS_BVME6000) p = (volatile char *)BVME_SCC_A_ADDR; #endif do { scc_delay(); } while (!(*p & 4)); scc_delay(); *p = 8; scc_delay(); *p = ch; } /* The console must be locked when we get here. */ static void scc_console_write (struct console *co, const char *str, unsigned count) { unsigned long flags; local_irq_save(flags); while (count--) { if (*str == '\n') scc_ch_write ('\r'); scc_ch_write (*str++); } local_irq_restore(flags); } static struct tty_driver *scc_console_device(struct console *c, int *index) { *index = c->index; return scc_driver; } static struct console sercons = { .name = "ttyS", .write = scc_console_write, .device = scc_console_device, .flags = CON_PRINTBUFFER, .index = -1, }; static int __init vme_scc_console_init(void) { if (vme_brdtype == VME_TYPE_MVME147 || vme_brdtype == VME_TYPE_MVME162 || vme_brdtype == VME_TYPE_MVME172 || vme_brdtype == VME_TYPE_BVME4000 || vme_brdtype == VME_TYPE_BVME6000) register_console(&sercons); return 0; } console_initcall(vme_scc_console_init);
gpl-2.0
ravikwow/jordan-kernel
crypto/twofish.c
1610
6425
/* * Twofish for CryptoAPI * * Originally Twofish for GPG * By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998 * 256-bit key length added March 20, 1999 * Some modifications to reduce the text size by Werner Koch, April, 1998 * Ported to the kerneli patch by Marc Mutz <Marc@Mutz.com> * Ported to CryptoAPI by Colin Slater <hoho@tacomeat.net> * * The original author has disclaimed all copyright interest in this * code and thus put it in the public domain. The subsequent authors * have put this under the GNU General Public License. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * * This code is a "clean room" implementation, written from the paper * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey, * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available * through http://www.counterpane.com/twofish.html * * For background information on multiplication in finite fields, used for * the matrix operations in the key schedule, see the book _Contemporary * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the * Third Edition. */ #include <asm/byteorder.h> #include <crypto/twofish.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/crypto.h> #include <linux/bitops.h> /* Macros to compute the g() function in the encryption and decryption * rounds. G1 is the straight g() function; G2 includes the 8-bit * rotation for the high 32-bit word. */ #define G1(a) \ (ctx->s[0][(a) & 0xFF]) ^ (ctx->s[1][((a) >> 8) & 0xFF]) \ ^ (ctx->s[2][((a) >> 16) & 0xFF]) ^ (ctx->s[3][(a) >> 24]) #define G2(b) \ (ctx->s[1][(b) & 0xFF]) ^ (ctx->s[2][((b) >> 8) & 0xFF]) \ ^ (ctx->s[3][((b) >> 16) & 0xFF]) ^ (ctx->s[0][(b) >> 24]) /* Encryption and decryption Feistel rounds. Each one calls the two g() * macros, does the PHT, and performs the XOR and the appropriate bit * rotations. The parameters are the round number (used to select subkeys), * and the four 32-bit chunks of the text. */ #define ENCROUND(n, a, b, c, d) \ x = G1 (a); y = G2 (b); \ x += y; y += x + ctx->k[2 * (n) + 1]; \ (c) ^= x + ctx->k[2 * (n)]; \ (c) = ror32((c), 1); \ (d) = rol32((d), 1) ^ y #define DECROUND(n, a, b, c, d) \ x = G1 (a); y = G2 (b); \ x += y; y += x; \ (d) ^= y + ctx->k[2 * (n) + 1]; \ (d) = ror32((d), 1); \ (c) = rol32((c), 1); \ (c) ^= (x + ctx->k[2 * (n)]) /* Encryption and decryption cycles; each one is simply two Feistel rounds * with the 32-bit chunks re-ordered to simulate the "swap" */ #define ENCCYCLE(n) \ ENCROUND (2 * (n), a, b, c, d); \ ENCROUND (2 * (n) + 1, c, d, a, b) #define DECCYCLE(n) \ DECROUND (2 * (n) + 1, c, d, a, b); \ DECROUND (2 * (n), a, b, c, d) /* Macros to convert the input and output bytes into 32-bit words, * and simultaneously perform the whitening step. INPACK packs word * number n into the variable named by x, using whitening subkey number m. * OUTUNPACK unpacks word number n from the variable named by x, using * whitening subkey number m. */ #define INPACK(n, x, m) \ x = le32_to_cpu(src[n]) ^ ctx->w[m] #define OUTUNPACK(n, x, m) \ x ^= ctx->w[m]; \ dst[n] = cpu_to_le32(x) /* Encrypt one block. in and out may be the same. */ static void twofish_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct twofish_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *src = (const __le32 *)in; __le32 *dst = (__le32 *)out; /* The four 32-bit chunks of the text. */ u32 a, b, c, d; /* Temporaries used by the round function. */ u32 x, y; /* Input whitening and packing. */ INPACK (0, a, 0); INPACK (1, b, 1); INPACK (2, c, 2); INPACK (3, d, 3); /* Encryption Feistel cycles. */ ENCCYCLE (0); ENCCYCLE (1); ENCCYCLE (2); ENCCYCLE (3); ENCCYCLE (4); ENCCYCLE (5); ENCCYCLE (6); ENCCYCLE (7); /* Output whitening and unpacking. */ OUTUNPACK (0, c, 4); OUTUNPACK (1, d, 5); OUTUNPACK (2, a, 6); OUTUNPACK (3, b, 7); } /* Decrypt one block. in and out may be the same. */ static void twofish_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct twofish_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *src = (const __le32 *)in; __le32 *dst = (__le32 *)out; /* The four 32-bit chunks of the text. */ u32 a, b, c, d; /* Temporaries used by the round function. */ u32 x, y; /* Input whitening and packing. */ INPACK (0, c, 4); INPACK (1, d, 5); INPACK (2, a, 6); INPACK (3, b, 7); /* Encryption Feistel cycles. */ DECCYCLE (7); DECCYCLE (6); DECCYCLE (5); DECCYCLE (4); DECCYCLE (3); DECCYCLE (2); DECCYCLE (1); DECCYCLE (0); /* Output whitening and unpacking. */ OUTUNPACK (0, a, 0); OUTUNPACK (1, b, 1); OUTUNPACK (2, c, 2); OUTUNPACK (3, d, 3); } static struct crypto_alg alg = { .cra_name = "twofish", .cra_driver_name = "twofish-generic", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = TF_BLOCK_SIZE, .cra_ctxsize = sizeof(struct twofish_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = TF_MIN_KEY_SIZE, .cia_max_keysize = TF_MAX_KEY_SIZE, .cia_setkey = twofish_setkey, .cia_encrypt = twofish_encrypt, .cia_decrypt = twofish_decrypt } } }; static int __init twofish_mod_init(void) { return crypto_register_alg(&alg); } static void __exit twofish_mod_fini(void) { crypto_unregister_alg(&alg); } module_init(twofish_mod_init); module_exit(twofish_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
gpl-2.0
eckucukoglu/sober-kernel
net/ipv4/tcp_vegas.c
1610
9853
/* * TCP Vegas congestion control * * This is based on the congestion detection/avoidance scheme described in * Lawrence S. Brakmo and Larry L. Peterson. * "TCP Vegas: End to end congestion avoidance on a global internet." * IEEE Journal on Selected Areas in Communication, 13(8):1465--1480, * October 1995. Available from: * ftp://ftp.cs.arizona.edu/xkernel/Papers/jsac.ps * * See http://www.cs.arizona.edu/xkernel/ for their implementation. * The main aspects that distinguish this implementation from the * Arizona Vegas implementation are: * o We do not change the loss detection or recovery mechanisms of * Linux in any way. Linux already recovers from losses quite well, * using fine-grained timers, NewReno, and FACK. * o To avoid the performance penalty imposed by increasing cwnd * only every-other RTT during slow start, we increase during * every RTT during slow start, just like Reno. * o Largely to allow continuous cwnd growth during slow start, * we use the rate at which ACKs come back as the "actual" * rate, rather than the rate at which data is sent. * o To speed convergence to the right rate, we set the cwnd * to achieve the right ("actual") rate when we exit slow start. * o To filter out the noise caused by delayed ACKs, we use the * minimum RTT sample observed during the last RTT to calculate * the actual rate. * o When the sender re-starts from idle, it waits until it has * received ACKs for an entire flight of new data before making * a cwnd adjustment decision. The original Vegas implementation * assumed senders never went idle. */ #include <linux/mm.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/inet_diag.h> #include <net/tcp.h> #include "tcp_vegas.h" static int alpha = 2; static int beta = 4; static int gamma = 1; module_param(alpha, int, 0644); MODULE_PARM_DESC(alpha, "lower bound of packets in network"); module_param(beta, int, 0644); MODULE_PARM_DESC(beta, "upper bound of packets in network"); module_param(gamma, int, 0644); MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)"); /* There are several situations when we must "re-start" Vegas: * * o when a connection is established * o after an RTO * o after fast recovery * o when we send a packet and there is no outstanding * unacknowledged data (restarting an idle connection) * * In these circumstances we cannot do a Vegas calculation at the * end of the first RTT, because any calculation we do is using * stale info -- both the saved cwnd and congestion feedback are * stale. * * Instead we must wait until the completion of an RTT during * which we actually receive ACKs. */ static void vegas_enable(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct vegas *vegas = inet_csk_ca(sk); /* Begin taking Vegas samples next time we send something. */ vegas->doing_vegas_now = 1; /* Set the beginning of the next send window. */ vegas->beg_snd_nxt = tp->snd_nxt; vegas->cntRTT = 0; vegas->minRTT = 0x7fffffff; } /* Stop taking Vegas samples for now. */ static inline void vegas_disable(struct sock *sk) { struct vegas *vegas = inet_csk_ca(sk); vegas->doing_vegas_now = 0; } void tcp_vegas_init(struct sock *sk) { struct vegas *vegas = inet_csk_ca(sk); vegas->baseRTT = 0x7fffffff; vegas_enable(sk); } EXPORT_SYMBOL_GPL(tcp_vegas_init); /* Do RTT sampling needed for Vegas. * Basically we: * o min-filter RTT samples from within an RTT to get the current * propagation delay + queuing delay (we are min-filtering to try to * avoid the effects of delayed ACKs) * o min-filter RTT samples from a much longer window (forever for now) * to find the propagation delay (baseRTT) */ void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us) { struct vegas *vegas = inet_csk_ca(sk); u32 vrtt; if (rtt_us < 0) return; /* Never allow zero rtt or baseRTT */ vrtt = rtt_us + 1; /* Filter to find propagation delay: */ if (vrtt < vegas->baseRTT) vegas->baseRTT = vrtt; /* Find the min RTT during the last RTT to find * the current prop. delay + queuing delay: */ vegas->minRTT = min(vegas->minRTT, vrtt); vegas->cntRTT++; } EXPORT_SYMBOL_GPL(tcp_vegas_pkts_acked); void tcp_vegas_state(struct sock *sk, u8 ca_state) { if (ca_state == TCP_CA_Open) vegas_enable(sk); else vegas_disable(sk); } EXPORT_SYMBOL_GPL(tcp_vegas_state); /* * If the connection is idle and we are restarting, * then we don't want to do any Vegas calculations * until we get fresh RTT samples. So when we * restart, we reset our Vegas state to a clean * slate. After we get acks for this flight of * packets, _then_ we can make Vegas calculations * again. */ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event) { if (event == CA_EVENT_CWND_RESTART || event == CA_EVENT_TX_START) tcp_vegas_init(sk); } EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event); static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp) { return min(tp->snd_ssthresh, tp->snd_cwnd-1); } static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) { struct tcp_sock *tp = tcp_sk(sk); struct vegas *vegas = inet_csk_ca(sk); if (!vegas->doing_vegas_now) { tcp_reno_cong_avoid(sk, ack, in_flight); return; } if (after(ack, vegas->beg_snd_nxt)) { /* Do the Vegas once-per-RTT cwnd adjustment. */ /* Save the extent of the current window so we can use this * at the end of the next RTT. */ vegas->beg_snd_nxt = tp->snd_nxt; /* We do the Vegas calculations only if we got enough RTT * samples that we can be reasonably sure that we got * at least one RTT sample that wasn't from a delayed ACK. * If we only had 2 samples total, * then that means we're getting only 1 ACK per RTT, which * means they're almost certainly delayed ACKs. * If we have 3 samples, we should be OK. */ if (vegas->cntRTT <= 2) { /* We don't have enough RTT samples to do the Vegas * calculation, so we'll behave like Reno. */ tcp_reno_cong_avoid(sk, ack, in_flight); } else { u32 rtt, diff; u64 target_cwnd; /* We have enough RTT samples, so, using the Vegas * algorithm, we determine if we should increase or * decrease cwnd, and by how much. */ /* Pluck out the RTT we are using for the Vegas * calculations. This is the min RTT seen during the * last RTT. Taking the min filters out the effects * of delayed ACKs, at the cost of noticing congestion * a bit later. */ rtt = vegas->minRTT; /* Calculate the cwnd we should have, if we weren't * going too fast. * * This is: * (actual rate in segments) * baseRTT */ target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT; do_div(target_cwnd, rtt); /* Calculate the difference between the window we had, * and the window we would like to have. This quantity * is the "Diff" from the Arizona Vegas papers. */ diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT; if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) { /* Going too fast. Time to slow down * and switch to congestion avoidance. */ /* Set cwnd to match the actual rate * exactly: * cwnd = (actual rate) * baseRTT * Then we add 1 because the integer * truncation robs us of full link * utilization. */ tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1); tp->snd_ssthresh = tcp_vegas_ssthresh(tp); } else if (tp->snd_cwnd <= tp->snd_ssthresh) { /* Slow start. */ tcp_slow_start(tp); } else { /* Congestion avoidance. */ /* Figure out where we would like cwnd * to be. */ if (diff > beta) { /* The old window was too fast, so * we slow down. */ tp->snd_cwnd--; tp->snd_ssthresh = tcp_vegas_ssthresh(tp); } else if (diff < alpha) { /* We don't have enough extra packets * in the network, so speed up. */ tp->snd_cwnd++; } else { /* Sending just as fast as we * should be. */ } } if (tp->snd_cwnd < 2) tp->snd_cwnd = 2; else if (tp->snd_cwnd > tp->snd_cwnd_clamp) tp->snd_cwnd = tp->snd_cwnd_clamp; tp->snd_ssthresh = tcp_current_ssthresh(sk); } /* Wipe the slate clean for the next RTT. */ vegas->cntRTT = 0; vegas->minRTT = 0x7fffffff; } /* Use normal slow start */ else if (tp->snd_cwnd <= tp->snd_ssthresh) tcp_slow_start(tp); } /* Extract info for Tcp socket info provided via netlink. */ void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) { const struct vegas *ca = inet_csk_ca(sk); if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { struct tcpvegas_info info = { .tcpv_enabled = ca->doing_vegas_now, .tcpv_rttcnt = ca->cntRTT, .tcpv_rtt = ca->baseRTT, .tcpv_minrtt = ca->minRTT, }; nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); } } EXPORT_SYMBOL_GPL(tcp_vegas_get_info); static struct tcp_congestion_ops tcp_vegas __read_mostly = { .flags = TCP_CONG_RTT_STAMP, .init = tcp_vegas_init, .ssthresh = tcp_reno_ssthresh, .cong_avoid = tcp_vegas_cong_avoid, .min_cwnd = tcp_reno_min_cwnd, .pkts_acked = tcp_vegas_pkts_acked, .set_state = tcp_vegas_state, .cwnd_event = tcp_vegas_cwnd_event, .get_info = tcp_vegas_get_info, .owner = THIS_MODULE, .name = "vegas", }; static int __init tcp_vegas_register(void) { BUILD_BUG_ON(sizeof(struct vegas) > ICSK_CA_PRIV_SIZE); tcp_register_congestion_control(&tcp_vegas); return 0; } static void __exit tcp_vegas_unregister(void) { tcp_unregister_congestion_control(&tcp_vegas); } module_init(tcp_vegas_register); module_exit(tcp_vegas_unregister); MODULE_AUTHOR("Stephen Hemminger"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("TCP Vegas");
gpl-2.0
flingone/rk3066-kernel
sound/soc/blackfin/bf5xx-ad73311.c
2378
6228
/* * File: sound/soc/blackfin/bf5xx-ad73311.c * Author: Cliff Cai <Cliff.Cai@analog.com> * * Created: Thur Sep 25 2008 * Description: Board driver for ad73311 sound chip * * Modified: * Copyright 2008 Analog Devices Inc. * * Bugs: Enter bugs at http://blackfin.uclinux.org/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/gpio.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/pcm_params.h> #include <asm/blackfin.h> #include <asm/cacheflush.h> #include <asm/irq.h> #include <asm/dma.h> #include <asm/portmux.h> #include "../codecs/ad73311.h" #include "bf5xx-sport.h" #include "bf5xx-i2s-pcm.h" #if CONFIG_SND_BF5XX_SPORT_NUM == 0 #define bfin_write_SPORT_TCR1 bfin_write_SPORT0_TCR1 #define bfin_read_SPORT_TCR1 bfin_read_SPORT0_TCR1 #define bfin_write_SPORT_TCR2 bfin_write_SPORT0_TCR2 #define bfin_write_SPORT_TX16 bfin_write_SPORT0_TX16 #define bfin_read_SPORT_STAT bfin_read_SPORT0_STAT #else #define bfin_write_SPORT_TCR1 bfin_write_SPORT1_TCR1 #define bfin_read_SPORT_TCR1 bfin_read_SPORT1_TCR1 #define bfin_write_SPORT_TCR2 bfin_write_SPORT1_TCR2 #define bfin_write_SPORT_TX16 bfin_write_SPORT1_TX16 #define bfin_read_SPORT_STAT bfin_read_SPORT1_STAT #endif #define GPIO_SE CONFIG_SND_BFIN_AD73311_SE static struct snd_soc_card bf5xx_ad73311; static int snd_ad73311_startup(void) { pr_debug("%s enter\n", __func__); /* Pull up SE pin on AD73311L */ gpio_set_value(GPIO_SE, 1); return 0; } static int snd_ad73311_configure(void) { unsigned short ctrl_regs[6]; unsigned short status = 0; int count = 0; /* DMCLK = MCLK = 16.384 MHz * SCLK = DMCLK/8 = 2.048 MHz * Sample Rate = DMCLK/2048 = 8 KHz */ ctrl_regs[0] = AD_CONTROL | AD_WRITE | CTRL_REG_B | REGB_MCDIV(0) | \ REGB_SCDIV(0) | REGB_DIRATE(0); ctrl_regs[1] = AD_CONTROL | AD_WRITE | CTRL_REG_C | REGC_PUDEV | \ REGC_PUADC | REGC_PUDAC | REGC_PUREF | REGC_REFUSE ; ctrl_regs[2] = AD_CONTROL | AD_WRITE | CTRL_REG_D | REGD_OGS(2) | \ REGD_IGS(2); ctrl_regs[3] = AD_CONTROL | AD_WRITE | CTRL_REG_E | REGE_DA(0x1f); ctrl_regs[4] = AD_CONTROL | AD_WRITE | CTRL_REG_F | REGF_SEEN ; ctrl_regs[5] = AD_CONTROL | AD_WRITE | CTRL_REG_A | REGA_MODE_DATA; local_irq_disable(); snd_ad73311_startup(); udelay(1); bfin_write_SPORT_TCR1(TFSR); bfin_write_SPORT_TCR2(0xF); SSYNC(); /* SPORT Tx Register is a 8 x 16 FIFO, all the data can be put to * FIFO before enable SPORT to transfer the data */ for (count = 0; count < 6; count++) bfin_write_SPORT_TX16(ctrl_regs[count]); SSYNC(); bfin_write_SPORT_TCR1(bfin_read_SPORT_TCR1() | TSPEN); SSYNC(); /* When TUVF is set, the data is already send out */ while (!(status & TUVF) && ++count < 10000) { udelay(1); status = bfin_read_SPORT_STAT(); SSYNC(); } bfin_write_SPORT_TCR1(bfin_read_SPORT_TCR1() & ~TSPEN); SSYNC(); local_irq_enable(); if (count >= 10000) { printk(KERN_ERR "ad73311: failed to configure codec\n"); return -1; } return 0; } static int bf5xx_probe(struct platform_device *pdev) { int err; if (gpio_request(GPIO_SE, "AD73311_SE")) { printk(KERN_ERR "%s: Failed ro request GPIO_%d\n", __func__, GPIO_SE); return -EBUSY; } gpio_direction_output(GPIO_SE, 0); err = snd_ad73311_configure(); if (err < 0) return -EFAULT; return 0; } static int bf5xx_ad73311_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int ret = 0; pr_debug("%s rate %d format %x\n", __func__, params_rate(params), params_format(params)); /* set cpu DAI configuration */ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); if (ret < 0) return ret; return 0; } static struct snd_soc_ops bf5xx_ad73311_ops = { .hw_params = bf5xx_ad73311_hw_params, }; static struct snd_soc_dai_link bf5xx_ad73311_dai[] = { { .name = "ad73311", .stream_name = "AD73311", .cpu_dai_name = "bfin-i2s.0", .codec_dai_name = "ad73311-hifi", .platform_name = "bfin-i2s-pcm-audio", .codec_name = "ad73311", .ops = &bf5xx_ad73311_ops, }, { .name = "ad73311", .stream_name = "AD73311", .cpu_dai_name = "bfin-i2s.1", .codec_dai_name = "ad73311-hifi", .platform_name = "bfin-i2s-pcm-audio", .codec_name = "ad73311", .ops = &bf5xx_ad73311_ops, }, }; static struct snd_soc_card bf5xx_ad73311 = { .name = "bfin-ad73311", .probe = bf5xx_probe, .dai_link = &bf5xx_ad73311_dai[CONFIG_SND_BF5XX_SPORT_NUM], .num_links = 1, }; static struct platform_device *bf5xx_ad73311_snd_device; static int __init bf5xx_ad73311_init(void) { int ret; pr_debug("%s enter\n", __func__); bf5xx_ad73311_snd_device = platform_device_alloc("soc-audio", -1); if (!bf5xx_ad73311_snd_device) return -ENOMEM; platform_set_drvdata(bf5xx_ad73311_snd_device, &bf5xx_ad73311); ret = platform_device_add(bf5xx_ad73311_snd_device); if (ret) platform_device_put(bf5xx_ad73311_snd_device); return ret; } static void __exit bf5xx_ad73311_exit(void) { pr_debug("%s enter\n", __func__); platform_device_unregister(bf5xx_ad73311_snd_device); } module_init(bf5xx_ad73311_init); module_exit(bf5xx_ad73311_exit); /* Module information */ MODULE_AUTHOR("Cliff Cai"); MODULE_DESCRIPTION("ALSA SoC AD73311 Blackfin"); MODULE_LICENSE("GPL");
gpl-2.0
ivanich/wireless-testing_torvalds
arch/sparc/kernel/pcic.c
2634
24783
/* * pcic.c: MicroSPARC-IIep PCI controller support * * Copyright (C) 1998 V. Roganov and G. Raiko * * Code is derived from Ultra/PCI PSYCHO controller support, see that * for author info. * * Support for diverse IIep based platforms by Pete Zaitcev. * CP-1200 by Eric Brower. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <asm/swift.h> /* for cache flushing. */ #include <asm/io.h> #include <linux/ctype.h> #include <linux/pci.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/interrupt.h> #include <linux/export.h> #include <asm/irq.h> #include <asm/oplib.h> #include <asm/prom.h> #include <asm/pcic.h> #include <asm/timex.h> #include <asm/timer.h> #include <asm/uaccess.h> #include <asm/irq_regs.h> #include "irq.h" /* * I studied different documents and many live PROMs both from 2.30 * family and 3.xx versions. I came to the amazing conclusion: there is * absolutely no way to route interrupts in IIep systems relying on * information which PROM presents. We must hardcode interrupt routing * schematics. And this actually sucks. -- zaitcev 1999/05/12 * * To find irq for a device we determine which routing map * is in effect or, in other words, on which machine we are running. * We use PROM name for this although other techniques may be used * in special cases (Gleb reports a PROMless IIep based system). * Once we know the map we take device configuration address and * find PCIC pin number where INT line goes. Then we may either program * preferred irq into the PCIC or supply the preexisting irq to the device. */ struct pcic_ca2irq { unsigned char busno; /* PCI bus number */ unsigned char devfn; /* Configuration address */ unsigned char pin; /* PCIC external interrupt pin */ unsigned char irq; /* Preferred IRQ (mappable in PCIC) */ unsigned int force; /* Enforce preferred IRQ */ }; struct pcic_sn2list { char *sysname; struct pcic_ca2irq *intmap; int mapdim; }; /* * JavaEngine-1 apparently has different versions. * * According to communications with Sun folks, for P2 build 501-4628-03: * pin 0 - parallel, audio; * pin 1 - Ethernet; * pin 2 - su; * pin 3 - PS/2 kbd and mouse. * * OEM manual (805-1486): * pin 0: Ethernet * pin 1: All EBus * pin 2: IGA (unused) * pin 3: Not connected * OEM manual says that 501-4628 & 501-4811 are the same thing, * only the latter has NAND flash in place. * * So far unofficial Sun wins over the OEM manual. Poor OEMs... */ static struct pcic_ca2irq pcic_i_je1a[] = { /* 501-4811-03 */ { 0, 0x00, 2, 12, 0 }, /* EBus: hogs all */ { 0, 0x01, 1, 6, 1 }, /* Happy Meal */ { 0, 0x80, 0, 7, 0 }, /* IGA (unused) */ }; /* XXX JS-E entry is incomplete - PCI Slot 2 address (pin 7)? */ static struct pcic_ca2irq pcic_i_jse[] = { { 0, 0x00, 0, 13, 0 }, /* Ebus - serial and keyboard */ { 0, 0x01, 1, 6, 0 }, /* hme */ { 0, 0x08, 2, 9, 0 }, /* VGA - we hope not used :) */ { 0, 0x10, 6, 8, 0 }, /* PCI INTA# in Slot 1 */ { 0, 0x18, 7, 12, 0 }, /* PCI INTA# in Slot 2, shared w. RTC */ { 0, 0x38, 4, 9, 0 }, /* All ISA devices. Read 8259. */ { 0, 0x80, 5, 11, 0 }, /* EIDE */ /* {0,0x88, 0,0,0} - unknown device... PMU? Probably no interrupt. */ { 0, 0xA0, 4, 9, 0 }, /* USB */ /* * Some pins belong to non-PCI devices, we hardcode them in drivers. * sun4m timers - irq 10, 14 * PC style RTC - pin 7, irq 4 ? * Smart card, Parallel - pin 4 shared with USB, ISA * audio - pin 3, irq 5 ? */ }; /* SPARCengine-6 was the original release name of CP1200. * The documentation differs between the two versions */ static struct pcic_ca2irq pcic_i_se6[] = { { 0, 0x08, 0, 2, 0 }, /* SCSI */ { 0, 0x01, 1, 6, 0 }, /* HME */ { 0, 0x00, 3, 13, 0 }, /* EBus */ }; /* * Krups (courtesy of Varol Kaptan) * No documentation available, but it was easy to guess * because it was very similar to Espresso. * * pin 0 - kbd, mouse, serial; * pin 1 - Ethernet; * pin 2 - igs (we do not use it); * pin 3 - audio; * pin 4,5,6 - unused; * pin 7 - RTC (from P2 onwards as David B. says). */ static struct pcic_ca2irq pcic_i_jk[] = { { 0, 0x00, 0, 13, 0 }, /* Ebus - serial and keyboard */ { 0, 0x01, 1, 6, 0 }, /* hme */ }; /* * Several entries in this list may point to the same routing map * as several PROMs may be installed on the same physical board. */ #define SN2L_INIT(name, map) \ { name, map, ARRAY_SIZE(map) } static struct pcic_sn2list pcic_known_sysnames[] = { SN2L_INIT("SUNW,JavaEngine1", pcic_i_je1a), /* JE1, PROM 2.32 */ SN2L_INIT("SUNW,JS-E", pcic_i_jse), /* PROLL JavaStation-E */ SN2L_INIT("SUNW,SPARCengine-6", pcic_i_se6), /* SPARCengine-6/CP-1200 */ SN2L_INIT("SUNW,JS-NC", pcic_i_jk), /* PROLL JavaStation-NC */ SN2L_INIT("SUNW,JSIIep", pcic_i_jk), /* OBP JavaStation-NC */ { NULL, NULL, 0 } }; /* * Only one PCIC per IIep, * and since we have no SMP IIep, only one per system. */ static int pcic0_up; static struct linux_pcic pcic0; void __iomem *pcic_regs; volatile int pcic_speculative; volatile int pcic_trapped; /* forward */ unsigned int pcic_build_device_irq(struct platform_device *op, unsigned int real_irq); #define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3)) static int pcic_read_config_dword(unsigned int busno, unsigned int devfn, int where, u32 *value) { struct linux_pcic *pcic; unsigned long flags; pcic = &pcic0; local_irq_save(flags); #if 0 /* does not fail here */ pcic_speculative = 1; pcic_trapped = 0; #endif writel(CONFIG_CMD(busno, devfn, where), pcic->pcic_config_space_addr); #if 0 /* does not fail here */ nop(); if (pcic_trapped) { local_irq_restore(flags); *value = ~0; return 0; } #endif pcic_speculative = 2; pcic_trapped = 0; *value = readl(pcic->pcic_config_space_data + (where&4)); nop(); if (pcic_trapped) { pcic_speculative = 0; local_irq_restore(flags); *value = ~0; return 0; } pcic_speculative = 0; local_irq_restore(flags); return 0; } static int pcic_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { unsigned int v; if (bus->number != 0) return -EINVAL; switch (size) { case 1: pcic_read_config_dword(bus->number, devfn, where&~3, &v); *val = 0xff & (v >> (8*(where & 3))); return 0; case 2: if (where&1) return -EINVAL; pcic_read_config_dword(bus->number, devfn, where&~3, &v); *val = 0xffff & (v >> (8*(where & 3))); return 0; case 4: if (where&3) return -EINVAL; pcic_read_config_dword(bus->number, devfn, where&~3, val); return 0; } return -EINVAL; } static int pcic_write_config_dword(unsigned int busno, unsigned int devfn, int where, u32 value) { struct linux_pcic *pcic; unsigned long flags; pcic = &pcic0; local_irq_save(flags); writel(CONFIG_CMD(busno, devfn, where), pcic->pcic_config_space_addr); writel(value, pcic->pcic_config_space_data + (where&4)); local_irq_restore(flags); return 0; } static int pcic_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { unsigned int v; if (bus->number != 0) return -EINVAL; switch (size) { case 1: pcic_read_config_dword(bus->number, devfn, where&~3, &v); v = (v & ~(0xff << (8*(where&3)))) | ((0xff&val) << (8*(where&3))); return pcic_write_config_dword(bus->number, devfn, where&~3, v); case 2: if (where&1) return -EINVAL; pcic_read_config_dword(bus->number, devfn, where&~3, &v); v = (v & ~(0xffff << (8*(where&3)))) | ((0xffff&val) << (8*(where&3))); return pcic_write_config_dword(bus->number, devfn, where&~3, v); case 4: if (where&3) return -EINVAL; return pcic_write_config_dword(bus->number, devfn, where, val); } return -EINVAL; } static struct pci_ops pcic_ops = { .read = pcic_read_config, .write = pcic_write_config, }; /* * On sparc64 pcibios_init() calls pci_controller_probe(). * We want PCIC probed little ahead so that interrupt controller * would be operational. */ int __init pcic_probe(void) { struct linux_pcic *pcic; struct linux_prom_registers regs[PROMREG_MAX]; struct linux_pbm_info* pbm; char namebuf[64]; phandle node; int err; if (pcic0_up) { prom_printf("PCIC: called twice!\n"); prom_halt(); } pcic = &pcic0; node = prom_getchild (prom_root_node); node = prom_searchsiblings (node, "pci"); if (node == 0) return -ENODEV; /* * Map in PCIC register set, config space, and IO base */ err = prom_getproperty(node, "reg", (char*)regs, sizeof(regs)); if (err == 0 || err == -1) { prom_printf("PCIC: Error, cannot get PCIC registers " "from PROM.\n"); prom_halt(); } pcic0_up = 1; pcic->pcic_res_regs.name = "pcic_registers"; pcic->pcic_regs = ioremap(regs[0].phys_addr, regs[0].reg_size); if (!pcic->pcic_regs) { prom_printf("PCIC: Error, cannot map PCIC registers.\n"); prom_halt(); } pcic->pcic_res_io.name = "pcic_io"; if ((pcic->pcic_io = (unsigned long) ioremap(regs[1].phys_addr, 0x10000)) == 0) { prom_printf("PCIC: Error, cannot map PCIC IO Base.\n"); prom_halt(); } pcic->pcic_res_cfg_addr.name = "pcic_cfg_addr"; if ((pcic->pcic_config_space_addr = ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == 0) { prom_printf("PCIC: Error, cannot map " "PCI Configuration Space Address.\n"); prom_halt(); } /* * Docs say three least significant bits in address and data * must be the same. Thus, we need adjust size of data. */ pcic->pcic_res_cfg_data.name = "pcic_cfg_data"; if ((pcic->pcic_config_space_data = ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == 0) { prom_printf("PCIC: Error, cannot map " "PCI Configuration Space Data.\n"); prom_halt(); } pbm = &pcic->pbm; pbm->prom_node = node; prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0; strcpy(pbm->prom_name, namebuf); { extern volatile int t_nmi[4]; extern int pcic_nmi_trap_patch[4]; t_nmi[0] = pcic_nmi_trap_patch[0]; t_nmi[1] = pcic_nmi_trap_patch[1]; t_nmi[2] = pcic_nmi_trap_patch[2]; t_nmi[3] = pcic_nmi_trap_patch[3]; swift_flush_dcache(); pcic_regs = pcic->pcic_regs; } prom_getstring(prom_root_node, "name", namebuf, 63); namebuf[63] = 0; { struct pcic_sn2list *p; for (p = pcic_known_sysnames; p->sysname != NULL; p++) { if (strcmp(namebuf, p->sysname) == 0) break; } pcic->pcic_imap = p->intmap; pcic->pcic_imdim = p->mapdim; } if (pcic->pcic_imap == NULL) { /* * We do not panic here for the sake of embedded systems. */ printk("PCIC: System %s is unknown, cannot route interrupts\n", namebuf); } return 0; } static void __init pcic_pbm_scan_bus(struct linux_pcic *pcic) { struct linux_pbm_info *pbm = &pcic->pbm; pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, &pcic_ops, pbm); #if 0 /* deadwood transplanted from sparc64 */ pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node); pci_record_assignments(pbm, pbm->pci_bus); pci_assign_unassigned(pbm, pbm->pci_bus); pci_fixup_irq(pbm, pbm->pci_bus); #endif } /* * Main entry point from the PCI subsystem. */ static int __init pcic_init(void) { struct linux_pcic *pcic; /* * PCIC should be initialized at start of the timer. * So, here we report the presence of PCIC and do some magic passes. */ if(!pcic0_up) return 0; pcic = &pcic0; /* * Switch off IOTLB translation. */ writeb(PCI_DVMA_CONTROL_IOTLB_DISABLE, pcic->pcic_regs+PCI_DVMA_CONTROL); /* * Increase mapped size for PCI memory space (DMA access). * Should be done in that order (size first, address second). * Why we couldn't set up 4GB and forget about it? XXX */ writel(0xF0000000UL, pcic->pcic_regs+PCI_SIZE_0); writel(0+PCI_BASE_ADDRESS_SPACE_MEMORY, pcic->pcic_regs+PCI_BASE_ADDRESS_0); pcic_pbm_scan_bus(pcic); return 0; } int pcic_present(void) { return pcic0_up; } static int pdev_to_pnode(struct linux_pbm_info *pbm, struct pci_dev *pdev) { struct linux_prom_pci_registers regs[PROMREG_MAX]; int err; phandle node = prom_getchild(pbm->prom_node); while(node) { err = prom_getproperty(node, "reg", (char *)&regs[0], sizeof(regs)); if(err != 0 && err != -1) { unsigned long devfn = (regs[0].which_io >> 8) & 0xff; if(devfn == pdev->devfn) return node; } node = prom_getsibling(node); } return 0; } static inline struct pcidev_cookie *pci_devcookie_alloc(void) { return kmalloc(sizeof(struct pcidev_cookie), GFP_ATOMIC); } static void pcic_map_pci_device(struct linux_pcic *pcic, struct pci_dev *dev, int node) { char namebuf[64]; unsigned long address; unsigned long flags; int j; if (node == 0 || node == -1) { strcpy(namebuf, "???"); } else { prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0; } for (j = 0; j < 6; j++) { address = dev->resource[j].start; if (address == 0) break; /* are sequential */ flags = dev->resource[j].flags; if ((flags & IORESOURCE_IO) != 0) { if (address < 0x10000) { /* * A device responds to I/O cycles on PCI. * We generate these cycles with memory * access into the fixed map (phys 0x30000000). * * Since a device driver does not want to * do ioremap() before accessing PC-style I/O, * we supply virtual, ready to access address. * * Note that request_region() * works for these devices. * * XXX Neat trick, but it's a *bad* idea * to shit into regions like that. * What if we want to allocate one more * PCI base address... */ dev->resource[j].start = pcic->pcic_io + address; dev->resource[j].end = 1; /* XXX */ dev->resource[j].flags = (flags & ~IORESOURCE_IO) | IORESOURCE_MEM; } else { /* * OOPS... PCI Spec allows this. Sun does * not have any devices getting above 64K * so it must be user with a weird I/O * board in a PCI slot. We must remap it * under 64K but it is not done yet. XXX */ printk("PCIC: Skipping I/O space at 0x%lx, " "this will Oops if a driver attaches " "device '%s' at %02x:%02x)\n", address, namebuf, dev->bus->number, dev->devfn); } } } } static void pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node) { struct pcic_ca2irq *p; unsigned int real_irq; int i, ivec; char namebuf[64]; if (node == 0 || node == -1) { strcpy(namebuf, "???"); } else { prom_getstring(node, "name", namebuf, sizeof(namebuf)); } if ((p = pcic->pcic_imap) == 0) { dev->irq = 0; return; } for (i = 0; i < pcic->pcic_imdim; i++) { if (p->busno == dev->bus->number && p->devfn == dev->devfn) break; p++; } if (i >= pcic->pcic_imdim) { printk("PCIC: device %s devfn %02x:%02x not found in %d\n", namebuf, dev->bus->number, dev->devfn, pcic->pcic_imdim); dev->irq = 0; return; } i = p->pin; if (i >= 0 && i < 4) { ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO); real_irq = ivec >> (i << 2) & 0xF; } else if (i >= 4 && i < 8) { ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI); real_irq = ivec >> ((i-4) << 2) & 0xF; } else { /* Corrupted map */ printk("PCIC: BAD PIN %d\n", i); for (;;) {} } /* P3 */ /* printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq); */ /* real_irq means PROM did not bother to program the upper * half of PCIC. This happens on JS-E with PROM 3.11, for instance. */ if (real_irq == 0 || p->force) { if (p->irq == 0 || p->irq >= 15) { /* Corrupted map */ printk("PCIC: BAD IRQ %d\n", p->irq); for (;;) {} } printk("PCIC: setting irq %d at pin %d for device %02x:%02x\n", p->irq, p->pin, dev->bus->number, dev->devfn); real_irq = p->irq; i = p->pin; if (i >= 4) { ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI); ivec &= ~(0xF << ((i - 4) << 2)); ivec |= p->irq << ((i - 4) << 2); writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_HI); } else { ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO); ivec &= ~(0xF << (i << 2)); ivec |= p->irq << (i << 2); writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_LO); } } dev->irq = pcic_build_device_irq(NULL, real_irq); } /* * Normally called from {do_}pci_scan_bus... */ void pcibios_fixup_bus(struct pci_bus *bus) { struct pci_dev *dev; int i, has_io, has_mem; unsigned int cmd; struct linux_pcic *pcic; /* struct linux_pbm_info* pbm = &pcic->pbm; */ int node; struct pcidev_cookie *pcp; if (!pcic0_up) { printk("pcibios_fixup_bus: no PCIC\n"); return; } pcic = &pcic0; /* * Next crud is an equivalent of pbm = pcic_bus_to_pbm(bus); */ if (bus->number != 0) { printk("pcibios_fixup_bus: nonzero bus 0x%x\n", bus->number); return; } list_for_each_entry(dev, &bus->devices, bus_list) { /* * Comment from i386 branch: * There are buggy BIOSes that forget to enable I/O and memory * access to PCI devices. We try to fix this, but we need to * be sure that the BIOS didn't forget to assign an address * to the device. [mj] * OBP is a case of such BIOS :-) */ has_io = has_mem = 0; for(i=0; i<6; i++) { unsigned long f = dev->resource[i].flags; if (f & IORESOURCE_IO) { has_io = 1; } else if (f & IORESOURCE_MEM) has_mem = 1; } pcic_read_config(dev->bus, dev->devfn, PCI_COMMAND, 2, &cmd); if (has_io && !(cmd & PCI_COMMAND_IO)) { printk("PCIC: Enabling I/O for device %02x:%02x\n", dev->bus->number, dev->devfn); cmd |= PCI_COMMAND_IO; pcic_write_config(dev->bus, dev->devfn, PCI_COMMAND, 2, cmd); } if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) { printk("PCIC: Enabling memory for device %02x:%02x\n", dev->bus->number, dev->devfn); cmd |= PCI_COMMAND_MEMORY; pcic_write_config(dev->bus, dev->devfn, PCI_COMMAND, 2, cmd); } node = pdev_to_pnode(&pcic->pbm, dev); if(node == 0) node = -1; /* cookies */ pcp = pci_devcookie_alloc(); pcp->pbm = &pcic->pbm; pcp->prom_node = of_find_node_by_phandle(node); dev->sysdata = pcp; /* fixing I/O to look like memory */ if ((dev->class>>16) != PCI_BASE_CLASS_BRIDGE) pcic_map_pci_device(pcic, dev, node); pcic_fill_irq(pcic, dev, node); } } /* * pcic_pin_to_irq() is exported to bus probing code */ unsigned int pcic_pin_to_irq(unsigned int pin, const char *name) { struct linux_pcic *pcic = &pcic0; unsigned int irq; unsigned int ivec; if (pin < 4) { ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO); irq = ivec >> (pin << 2) & 0xF; } else if (pin < 8) { ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI); irq = ivec >> ((pin-4) << 2) & 0xF; } else { /* Corrupted map */ printk("PCIC: BAD PIN %d FOR %s\n", pin, name); for (;;) {} /* XXX Cannot panic properly in case of PROLL */ } /* P3 */ /* printk("PCIC: dev %s pin %d ivec 0x%x irq %x\n", name, pin, ivec, irq); */ return irq; } /* Makes compiler happy */ static volatile int pcic_timer_dummy; static void pcic_clear_clock_irq(void) { pcic_timer_dummy = readl(pcic0.pcic_regs+PCI_SYS_LIMIT); } /* CPU frequency is 100 MHz, timer increments every 4 CPU clocks */ #define USECS_PER_JIFFY (1000000 / HZ) #define TICK_TIMER_LIMIT ((100 * 1000000 / 4) / HZ) static unsigned int pcic_cycles_offset(void) { u32 value, count; value = readl(pcic0.pcic_regs + PCI_SYS_COUNTER); count = value & ~PCI_SYS_COUNTER_OVERFLOW; if (value & PCI_SYS_COUNTER_OVERFLOW) count += TICK_TIMER_LIMIT; /* * We divide all by HZ * to have microsecond resolution and to avoid overflow */ count = ((count / HZ) * USECS_PER_JIFFY) / (TICK_TIMER_LIMIT / HZ); /* Coordinate with the sparc_config.clock_rate setting */ return count * 2; } void __init pci_time_init(void) { struct linux_pcic *pcic = &pcic0; unsigned long v; int timer_irq, irq; int err; #ifndef CONFIG_SMP /* * The clock_rate is in SBUS dimension. * We take into account this in pcic_cycles_offset() */ sparc_config.clock_rate = SBUS_CLOCK_RATE / HZ; sparc_config.features |= FEAT_L10_CLOCKEVENT; #endif sparc_config.features |= FEAT_L10_CLOCKSOURCE; sparc_config.get_cycles_offset = pcic_cycles_offset; writel (TICK_TIMER_LIMIT, pcic->pcic_regs+PCI_SYS_LIMIT); /* PROM should set appropriate irq */ v = readb(pcic->pcic_regs+PCI_COUNTER_IRQ); timer_irq = PCI_COUNTER_IRQ_SYS(v); writel (PCI_COUNTER_IRQ_SET(timer_irq, 0), pcic->pcic_regs+PCI_COUNTER_IRQ); irq = pcic_build_device_irq(NULL, timer_irq); err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); if (err) { prom_printf("time_init: unable to attach IRQ%d\n", timer_irq); prom_halt(); } local_irq_enable(); } #if 0 static void watchdog_reset() { writeb(0, pcic->pcic_regs+PCI_SYS_STATUS); } #endif resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { return res->start; } int pcibios_enable_device(struct pci_dev *pdev, int mask) { return 0; } /* * NMI */ void pcic_nmi(unsigned int pend, struct pt_regs *regs) { pend = flip_dword(pend); if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) { /* * XXX On CP-1200 PCI #SERR may happen, we do not know * what to do about it yet. */ printk("Aiee, NMI pend 0x%x pc 0x%x spec %d, hanging\n", pend, (int)regs->pc, pcic_speculative); for (;;) { } } pcic_speculative = 0; pcic_trapped = 1; regs->pc = regs->npc; regs->npc += 4; } static inline unsigned long get_irqmask(int irq_nr) { return 1 << irq_nr; } static void pcic_mask_irq(struct irq_data *data) { unsigned long mask, flags; mask = (unsigned long)data->chip_data; local_irq_save(flags); writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET); local_irq_restore(flags); } static void pcic_unmask_irq(struct irq_data *data) { unsigned long mask, flags; mask = (unsigned long)data->chip_data; local_irq_save(flags); writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR); local_irq_restore(flags); } static unsigned int pcic_startup_irq(struct irq_data *data) { irq_link(data->irq); pcic_unmask_irq(data); return 0; } static struct irq_chip pcic_irq = { .name = "pcic", .irq_startup = pcic_startup_irq, .irq_mask = pcic_mask_irq, .irq_unmask = pcic_unmask_irq, }; unsigned int pcic_build_device_irq(struct platform_device *op, unsigned int real_irq) { unsigned int irq; unsigned long mask; irq = 0; mask = get_irqmask(real_irq); if (mask == 0) goto out; irq = irq_alloc(real_irq, real_irq); if (irq == 0) goto out; irq_set_chip_and_handler_name(irq, &pcic_irq, handle_level_irq, "PCIC"); irq_set_chip_data(irq, (void *)mask); out: return irq; } static void pcic_load_profile_irq(int cpu, unsigned int limit) { printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__); } void __init sun4m_pci_init_IRQ(void) { sparc_config.build_device_irq = pcic_build_device_irq; sparc_config.clear_clock_irq = pcic_clear_clock_irq; sparc_config.load_profile_irq = pcic_load_profile_irq; } /* * This probably belongs here rather than ioport.c because * we do not want this crud linked into SBus kernels. * Also, think for a moment about likes of floppy.c that * include architecture specific parts. They may want to redefine ins/outs. * * We do not use horrible macros here because we want to * advance pointer by sizeof(size). */ void outsb(unsigned long addr, const void *src, unsigned long count) { while (count) { count -= 1; outb(*(const char *)src, addr); src += 1; /* addr += 1; */ } } EXPORT_SYMBOL(outsb); void outsw(unsigned long addr, const void *src, unsigned long count) { while (count) { count -= 2; outw(*(const short *)src, addr); src += 2; /* addr += 2; */ } } EXPORT_SYMBOL(outsw); void outsl(unsigned long addr, const void *src, unsigned long count) { while (count) { count -= 4; outl(*(const long *)src, addr); src += 4; /* addr += 4; */ } } EXPORT_SYMBOL(outsl); void insb(unsigned long addr, void *dst, unsigned long count) { while (count) { count -= 1; *(unsigned char *)dst = inb(addr); dst += 1; /* addr += 1; */ } } EXPORT_SYMBOL(insb); void insw(unsigned long addr, void *dst, unsigned long count) { while (count) { count -= 2; *(unsigned short *)dst = inw(addr); dst += 2; /* addr += 2; */ } } EXPORT_SYMBOL(insw); void insl(unsigned long addr, void *dst, unsigned long count) { while (count) { count -= 4; /* * XXX I am sure we are in for an unaligned trap here. */ *(unsigned long *)dst = inl(addr); dst += 4; /* addr += 4; */ } } EXPORT_SYMBOL(insl); subsys_initcall(pcic_init);
gpl-2.0
jwchen1259/nevermore
drivers/base/power/domain_governor.c
3402
6706
/* * drivers/base/power/domain_governor.c - Governors for device PM domains. * * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. * * This file is released under the GPLv2. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/pm_domain.h> #include <linux/pm_qos.h> #include <linux/hrtimer.h> #ifdef CONFIG_PM_RUNTIME static int dev_update_qos_constraint(struct device *dev, void *data) { s64 *constraint_ns_p = data; s32 constraint_ns = -1; if (dev->power.subsys_data && dev->power.subsys_data->domain_data) constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns; if (constraint_ns < 0) { constraint_ns = dev_pm_qos_read_value(dev); constraint_ns *= NSEC_PER_USEC; } if (constraint_ns == 0) return 0; /* * constraint_ns cannot be negative here, because the device has been * suspended. */ if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0) *constraint_ns_p = constraint_ns; return 0; } /** * default_stop_ok - Default PM domain governor routine for stopping devices. * @dev: Device to check. */ bool default_stop_ok(struct device *dev) { struct gpd_timing_data *td = &dev_gpd_data(dev)->td; unsigned long flags; s64 constraint_ns; dev_dbg(dev, "%s()\n", __func__); spin_lock_irqsave(&dev->power.lock, flags); if (!td->constraint_changed) { bool ret = td->cached_stop_ok; spin_unlock_irqrestore(&dev->power.lock, flags); return ret; } td->constraint_changed = false; td->cached_stop_ok = false; td->effective_constraint_ns = -1; constraint_ns = __dev_pm_qos_read_value(dev); spin_unlock_irqrestore(&dev->power.lock, flags); if (constraint_ns < 0) return false; constraint_ns *= NSEC_PER_USEC; /* * We can walk the children without any additional locking, because * they all have been suspended at this point and their * effective_constraint_ns fields won't be modified in parallel with us. */ if (!dev->power.ignore_children) device_for_each_child(dev, &constraint_ns, dev_update_qos_constraint); if (constraint_ns > 0) { constraint_ns -= td->start_latency_ns; if (constraint_ns == 0) return false; } td->effective_constraint_ns = constraint_ns; td->cached_stop_ok = constraint_ns > td->stop_latency_ns || constraint_ns == 0; /* * The children have been suspended already, so we don't need to take * their stop latencies into account here. */ return td->cached_stop_ok; } /** * default_power_down_ok - Default generic PM domain power off governor routine. * @pd: PM domain to check. * * This routine must be executed under the PM domain's lock. */ static bool default_power_down_ok(struct dev_pm_domain *pd) { struct generic_pm_domain *genpd = pd_to_genpd(pd); struct gpd_link *link; struct pm_domain_data *pdd; s64 min_off_time_ns; s64 off_on_time_ns; if (genpd->max_off_time_changed) { struct gpd_link *link; /* * We have to invalidate the cached results for the masters, so * use the observation that default_power_down_ok() is not * going to be called for any master until this instance * returns. */ list_for_each_entry(link, &genpd->slave_links, slave_node) link->master->max_off_time_changed = true; genpd->max_off_time_changed = false; genpd->cached_power_down_ok = false; genpd->max_off_time_ns = -1; } else { return genpd->cached_power_down_ok; } off_on_time_ns = genpd->power_off_latency_ns + genpd->power_on_latency_ns; /* * It doesn't make sense to remove power from the domain if saving * the state of all devices in it and the power off/power on operations * take too much time. * * All devices in this domain have been stopped already at this point. */ list_for_each_entry(pdd, &genpd->dev_list, list_node) { if (pdd->dev->driver) off_on_time_ns += to_gpd_data(pdd)->td.save_state_latency_ns; } min_off_time_ns = -1; /* * Check if subdomains can be off for enough time. * * All subdomains have been powered off already at this point. */ list_for_each_entry(link, &genpd->master_links, master_node) { struct generic_pm_domain *sd = link->slave; s64 sd_max_off_ns = sd->max_off_time_ns; if (sd_max_off_ns < 0) continue; /* * Check if the subdomain is allowed to be off long enough for * the current domain to turn off and on (that's how much time * it will have to wait worst case). */ if (sd_max_off_ns <= off_on_time_ns) return false; if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0) min_off_time_ns = sd_max_off_ns; } /* * Check if the devices in the domain can be off enough time. */ list_for_each_entry(pdd, &genpd->dev_list, list_node) { struct gpd_timing_data *td; s64 constraint_ns; if (!pdd->dev->driver) continue; /* * Check if the device is allowed to be off long enough for the * domain to turn off and on (that's how much time it will * have to wait worst case). */ td = &to_gpd_data(pdd)->td; constraint_ns = td->effective_constraint_ns; /* default_stop_ok() need not be called before us. */ if (constraint_ns < 0) { constraint_ns = dev_pm_qos_read_value(pdd->dev); constraint_ns *= NSEC_PER_USEC; } if (constraint_ns == 0) continue; /* * constraint_ns cannot be negative here, because the device has * been suspended. */ constraint_ns -= td->restore_state_latency_ns; if (constraint_ns <= off_on_time_ns) return false; if (min_off_time_ns > constraint_ns || min_off_time_ns < 0) min_off_time_ns = constraint_ns; } genpd->cached_power_down_ok = true; /* * If the computed minimum device off time is negative, there are no * latency constraints, so the domain can spend arbitrary time in the * "off" state. */ if (min_off_time_ns < 0) return true; /* * The difference between the computed minimum subdomain or device off * time and the time needed to turn the domain on is the maximum * theoretical time this domain can spend in the "off" state. */ genpd->max_off_time_ns = min_off_time_ns - genpd->power_on_latency_ns; return true; } static bool always_on_power_down_ok(struct dev_pm_domain *domain) { return false; } #else /* !CONFIG_PM_RUNTIME */ bool default_stop_ok(struct device *dev) { return false; } #define default_power_down_ok NULL #define always_on_power_down_ok NULL #endif /* !CONFIG_PM_RUNTIME */ struct dev_power_governor simple_qos_governor = { .stop_ok = default_stop_ok, .power_down_ok = default_power_down_ok, }; /** * pm_genpd_gov_always_on - A governor implementing an always-on policy */ struct dev_power_governor pm_domain_always_on_gov = { .power_down_ok = always_on_power_down_ok, .stop_ok = default_stop_ok, };
gpl-2.0
FrozenCow/FIRE-ICE
arch/arm/mach-s3c24xx/bast-irq.c
3658
3538
/* linux/arch/arm/mach-s3c2410/bast-irq.c * * Copyright 2003-2005 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * http://www.simtec.co.uk/products/EB2410ITX/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/device.h> #include <linux/io.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/regs-irq.h> #include "bast.h" #define irqdbf(x...) #define irqdbf2(x...) /* handle PC104 ISA interrupts from the system CPLD */ /* table of ISA irq nos to the relevant mask... zero means * the irq is not implemented */ static unsigned char bast_pc104_irqmasks[] = { 0, /* 0 */ 0, /* 1 */ 0, /* 2 */ 1, /* 3 */ 0, /* 4 */ 2, /* 5 */ 0, /* 6 */ 4, /* 7 */ 0, /* 8 */ 0, /* 9 */ 8, /* 10 */ 0, /* 11 */ 0, /* 12 */ 0, /* 13 */ 0, /* 14 */ 0, /* 15 */ }; static unsigned char bast_pc104_irqs[] = { 3, 5, 7, 10 }; static void bast_pc104_mask(struct irq_data *data) { unsigned long temp; temp = __raw_readb(BAST_VA_PC104_IRQMASK); temp &= ~bast_pc104_irqmasks[data->irq]; __raw_writeb(temp, BAST_VA_PC104_IRQMASK); } static void bast_pc104_maskack(struct irq_data *data) { struct irq_desc *desc = irq_desc + BAST_IRQ_ISA; bast_pc104_mask(data); desc->irq_data.chip->irq_ack(&desc->irq_data); } static void bast_pc104_unmask(struct irq_data *data) { unsigned long temp; temp = __raw_readb(BAST_VA_PC104_IRQMASK); temp |= bast_pc104_irqmasks[data->irq]; __raw_writeb(temp, BAST_VA_PC104_IRQMASK); } static struct irq_chip bast_pc104_chip = { .irq_mask = bast_pc104_mask, .irq_unmask = bast_pc104_unmask, .irq_ack = bast_pc104_maskack }; static void bast_irq_pc104_demux(unsigned int irq, struct irq_desc *desc) { unsigned int stat; unsigned int irqno; int i; stat = __raw_readb(BAST_VA_PC104_IRQREQ) & 0xf; if (unlikely(stat == 0)) { /* ack if we get an irq with nothing (ie, startup) */ desc = irq_desc + BAST_IRQ_ISA; desc->irq_data.chip->irq_ack(&desc->irq_data); } else { /* handle the IRQ */ for (i = 0; stat != 0; i++, stat >>= 1) { if (stat & 1) { irqno = bast_pc104_irqs[i]; generic_handle_irq(irqno); } } } } static __init int bast_irq_init(void) { unsigned int i; if (machine_is_bast()) { printk(KERN_INFO "BAST PC104 IRQ routing, Copyright 2005 Simtec Electronics\n"); /* zap all the IRQs */ __raw_writeb(0x0, BAST_VA_PC104_IRQMASK); irq_set_chained_handler(BAST_IRQ_ISA, bast_irq_pc104_demux); /* register our IRQs */ for (i = 0; i < 4; i++) { unsigned int irqno = bast_pc104_irqs[i]; irq_set_chip_and_handler(irqno, &bast_pc104_chip, handle_level_irq); set_irq_flags(irqno, IRQF_VALID); } } return 0; } arch_initcall(bast_irq_init);
gpl-2.0
qs2d/linux-3.10
fs/logfs/super.c
4170
16726
/* * fs/logfs/super.c * * As should be obvious for Linux kernel code, license is GPLv2 * * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> * * Generally contains mount/umount code and also serves as a dump area for * any functions that don't fit elsewhere and neither justify a file of their * own. */ #include "logfs.h" #include <linux/bio.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/statfs.h> #include <linux/buffer_head.h> static DEFINE_MUTEX(emergency_mutex); static struct page *emergency_page; struct page *emergency_read_begin(struct address_space *mapping, pgoff_t index) { filler_t *filler = (filler_t *)mapping->a_ops->readpage; struct page *page; int err; page = read_cache_page(mapping, index, filler, NULL); if (page) return page; /* No more pages available, switch to emergency page */ printk(KERN_INFO"Logfs: Using emergency page\n"); mutex_lock(&emergency_mutex); err = filler(NULL, emergency_page); if (err) { mutex_unlock(&emergency_mutex); printk(KERN_EMERG"Logfs: Error reading emergency page\n"); return ERR_PTR(err); } return emergency_page; } void emergency_read_end(struct page *page) { if (page == emergency_page) mutex_unlock(&emergency_mutex); else page_cache_release(page); } static void dump_segfile(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct logfs_segment_entry se; u32 segno; for (segno = 0; segno < super->s_no_segs; segno++) { logfs_get_segment_entry(sb, segno, &se); printk("%3x: %6x %8x", segno, be32_to_cpu(se.ec_level), be32_to_cpu(se.valid)); if (++segno < super->s_no_segs) { logfs_get_segment_entry(sb, segno, &se); printk(" %6x %8x", be32_to_cpu(se.ec_level), be32_to_cpu(se.valid)); } if (++segno < super->s_no_segs) { logfs_get_segment_entry(sb, segno, &se); printk(" %6x %8x", be32_to_cpu(se.ec_level), be32_to_cpu(se.valid)); } if (++segno < super->s_no_segs) { logfs_get_segment_entry(sb, segno, &se); printk(" %6x %8x", be32_to_cpu(se.ec_level), be32_to_cpu(se.valid)); } printk("\n"); } } /* * logfs_crash_dump - dump debug information to device * * The LogFS superblock only occupies part of a segment. This function will * write as much debug information as it can gather into the spare space. */ void logfs_crash_dump(struct super_block *sb) { dump_segfile(sb); } /* * FIXME: There should be a reserve for root, similar to ext2. */ int logfs_statfs(struct dentry *dentry, struct kstatfs *stats) { struct super_block *sb = dentry->d_sb; struct logfs_super *super = logfs_super(sb); stats->f_type = LOGFS_MAGIC_U32; stats->f_bsize = sb->s_blocksize; stats->f_blocks = super->s_size >> LOGFS_BLOCK_BITS >> 3; stats->f_bfree = super->s_free_bytes >> sb->s_blocksize_bits; stats->f_bavail = super->s_free_bytes >> sb->s_blocksize_bits; stats->f_files = 0; stats->f_ffree = 0; stats->f_namelen = LOGFS_MAX_NAMELEN; return 0; } static int logfs_sb_set(struct super_block *sb, void *_super) { struct logfs_super *super = _super; sb->s_fs_info = super; sb->s_mtd = super->s_mtd; sb->s_bdev = super->s_bdev; #ifdef CONFIG_BLOCK if (sb->s_bdev) sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info; #endif #ifdef CONFIG_MTD if (sb->s_mtd) sb->s_bdi = sb->s_mtd->backing_dev_info; #endif return 0; } static int logfs_sb_test(struct super_block *sb, void *_super) { struct logfs_super *super = _super; struct mtd_info *mtd = super->s_mtd; if (mtd && sb->s_mtd == mtd) return 1; if (super->s_bdev && sb->s_bdev == super->s_bdev) return 1; return 0; } static void set_segment_header(struct logfs_segment_header *sh, u8 type, u8 level, u32 segno, u32 ec) { sh->pad = 0; sh->type = type; sh->level = level; sh->segno = cpu_to_be32(segno); sh->ec = cpu_to_be32(ec); sh->gec = cpu_to_be64(segno); sh->crc = logfs_crc32(sh, LOGFS_SEGMENT_HEADERSIZE, 4); } static void logfs_write_ds(struct super_block *sb, struct logfs_disk_super *ds, u32 segno, u32 ec) { struct logfs_super *super = logfs_super(sb); struct logfs_segment_header *sh = &ds->ds_sh; int i; memset(ds, 0, sizeof(*ds)); set_segment_header(sh, SEG_SUPER, 0, segno, ec); ds->ds_ifile_levels = super->s_ifile_levels; ds->ds_iblock_levels = super->s_iblock_levels; ds->ds_data_levels = super->s_data_levels; /* XXX: Remove */ ds->ds_segment_shift = super->s_segshift; ds->ds_block_shift = sb->s_blocksize_bits; ds->ds_write_shift = super->s_writeshift; ds->ds_filesystem_size = cpu_to_be64(super->s_size); ds->ds_segment_size = cpu_to_be32(super->s_segsize); ds->ds_bad_seg_reserve = cpu_to_be32(super->s_bad_seg_reserve); ds->ds_feature_incompat = cpu_to_be64(super->s_feature_incompat); ds->ds_feature_ro_compat= cpu_to_be64(super->s_feature_ro_compat); ds->ds_feature_compat = cpu_to_be64(super->s_feature_compat); ds->ds_feature_flags = cpu_to_be64(super->s_feature_flags); ds->ds_root_reserve = cpu_to_be64(super->s_root_reserve); ds->ds_speed_reserve = cpu_to_be64(super->s_speed_reserve); journal_for_each(i) ds->ds_journal_seg[i] = cpu_to_be32(super->s_journal_seg[i]); ds->ds_magic = cpu_to_be64(LOGFS_MAGIC); ds->ds_crc = logfs_crc32(ds, sizeof(*ds), LOGFS_SEGMENT_HEADERSIZE + 12); } static int write_one_sb(struct super_block *sb, struct page *(*find_sb)(struct super_block *sb, u64 *ofs)) { struct logfs_super *super = logfs_super(sb); struct logfs_disk_super *ds; struct logfs_segment_entry se; struct page *page; u64 ofs; u32 ec, segno; int err; page = find_sb(sb, &ofs); if (!page) return -EIO; ds = page_address(page); segno = seg_no(sb, ofs); logfs_get_segment_entry(sb, segno, &se); ec = be32_to_cpu(se.ec_level) >> 4; ec++; logfs_set_segment_erased(sb, segno, ec, 0); logfs_write_ds(sb, ds, segno, ec); err = super->s_devops->write_sb(sb, page); page_cache_release(page); return err; } int logfs_write_sb(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); int err; /* First superblock */ err = write_one_sb(sb, super->s_devops->find_first_sb); if (err) return err; /* Last superblock */ err = write_one_sb(sb, super->s_devops->find_last_sb); if (err) return err; return 0; } static int ds_cmp(const void *ds0, const void *ds1) { size_t len = sizeof(struct logfs_disk_super); /* We know the segment headers differ, so ignore them */ len -= LOGFS_SEGMENT_HEADERSIZE; ds0 += LOGFS_SEGMENT_HEADERSIZE; ds1 += LOGFS_SEGMENT_HEADERSIZE; return memcmp(ds0, ds1, len); } static int logfs_recover_sb(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct logfs_disk_super _ds0, *ds0 = &_ds0; struct logfs_disk_super _ds1, *ds1 = &_ds1; int err, valid0, valid1; /* read first superblock */ err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0); if (err) return err; /* read last superblock */ err = wbuf_read(sb, super->s_sb_ofs[1], sizeof(*ds1), ds1); if (err) return err; valid0 = logfs_check_ds(ds0) == 0; valid1 = logfs_check_ds(ds1) == 0; if (!valid0 && valid1) { printk(KERN_INFO"First superblock is invalid - fixing.\n"); return write_one_sb(sb, super->s_devops->find_first_sb); } if (valid0 && !valid1) { printk(KERN_INFO"Last superblock is invalid - fixing.\n"); return write_one_sb(sb, super->s_devops->find_last_sb); } if (valid0 && valid1 && ds_cmp(ds0, ds1)) { printk(KERN_INFO"Superblocks don't match - fixing.\n"); return logfs_write_sb(sb); } /* If neither is valid now, something's wrong. Didn't we properly * check them before?!? */ BUG_ON(!valid0 && !valid1); return 0; } static int logfs_make_writeable(struct super_block *sb) { int err; err = logfs_open_segfile(sb); if (err) return err; /* Repair any broken superblock copies */ err = logfs_recover_sb(sb); if (err) return err; /* Check areas for trailing unaccounted data */ err = logfs_check_areas(sb); if (err) return err; /* Do one GC pass before any data gets dirtied */ logfs_gc_pass(sb); /* after all initializations are done, replay the journal * for rw-mounts, if necessary */ err = logfs_replay_journal(sb); if (err) return err; return 0; } static int logfs_get_sb_final(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct inode *rootdir; int err; /* root dir */ rootdir = logfs_iget(sb, LOGFS_INO_ROOT); if (IS_ERR(rootdir)) goto fail; sb->s_root = d_make_root(rootdir); if (!sb->s_root) goto fail; /* at that point we know that ->put_super() will be called */ super->s_erase_page = alloc_pages(GFP_KERNEL, 0); if (!super->s_erase_page) return -ENOMEM; memset(page_address(super->s_erase_page), 0xFF, PAGE_SIZE); /* FIXME: check for read-only mounts */ err = logfs_make_writeable(sb); if (err) { __free_page(super->s_erase_page); return err; } log_super("LogFS: Finished mounting\n"); return 0; fail: iput(super->s_master_inode); iput(super->s_segfile_inode); iput(super->s_mapping_inode); return -EIO; } int logfs_check_ds(struct logfs_disk_super *ds) { struct logfs_segment_header *sh = &ds->ds_sh; if (ds->ds_magic != cpu_to_be64(LOGFS_MAGIC)) return -EINVAL; if (sh->crc != logfs_crc32(sh, LOGFS_SEGMENT_HEADERSIZE, 4)) return -EINVAL; if (ds->ds_crc != logfs_crc32(ds, sizeof(*ds), LOGFS_SEGMENT_HEADERSIZE + 12)) return -EINVAL; return 0; } static struct page *find_super_block(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct page *first, *last; first = super->s_devops->find_first_sb(sb, &super->s_sb_ofs[0]); if (!first || IS_ERR(first)) return NULL; last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]); if (!last || IS_ERR(last)) { page_cache_release(first); return NULL; } if (!logfs_check_ds(page_address(first))) { page_cache_release(last); return first; } /* First one didn't work, try the second superblock */ if (!logfs_check_ds(page_address(last))) { page_cache_release(first); return last; } /* Neither worked, sorry folks */ page_cache_release(first); page_cache_release(last); return NULL; } static int __logfs_read_sb(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct page *page; struct logfs_disk_super *ds; int i; page = find_super_block(sb); if (!page) return -EINVAL; ds = page_address(page); super->s_size = be64_to_cpu(ds->ds_filesystem_size); super->s_root_reserve = be64_to_cpu(ds->ds_root_reserve); super->s_speed_reserve = be64_to_cpu(ds->ds_speed_reserve); super->s_bad_seg_reserve = be32_to_cpu(ds->ds_bad_seg_reserve); super->s_segsize = 1 << ds->ds_segment_shift; super->s_segmask = (1 << ds->ds_segment_shift) - 1; super->s_segshift = ds->ds_segment_shift; sb->s_blocksize = 1 << ds->ds_block_shift; sb->s_blocksize_bits = ds->ds_block_shift; super->s_writesize = 1 << ds->ds_write_shift; super->s_writeshift = ds->ds_write_shift; super->s_no_segs = super->s_size >> super->s_segshift; super->s_no_blocks = super->s_segsize >> sb->s_blocksize_bits; super->s_feature_incompat = be64_to_cpu(ds->ds_feature_incompat); super->s_feature_ro_compat = be64_to_cpu(ds->ds_feature_ro_compat); super->s_feature_compat = be64_to_cpu(ds->ds_feature_compat); super->s_feature_flags = be64_to_cpu(ds->ds_feature_flags); journal_for_each(i) super->s_journal_seg[i] = be32_to_cpu(ds->ds_journal_seg[i]); super->s_ifile_levels = ds->ds_ifile_levels; super->s_iblock_levels = ds->ds_iblock_levels; super->s_data_levels = ds->ds_data_levels; super->s_total_levels = super->s_ifile_levels + super->s_iblock_levels + super->s_data_levels; page_cache_release(page); return 0; } static int logfs_read_sb(struct super_block *sb, int read_only) { struct logfs_super *super = logfs_super(sb); int ret; super->s_btree_pool = mempool_create(32, btree_alloc, btree_free, NULL); if (!super->s_btree_pool) return -ENOMEM; btree_init_mempool64(&super->s_shadow_tree.new, super->s_btree_pool); btree_init_mempool64(&super->s_shadow_tree.old, super->s_btree_pool); btree_init_mempool32(&super->s_shadow_tree.segment_map, super->s_btree_pool); ret = logfs_init_mapping(sb); if (ret) return ret; ret = __logfs_read_sb(sb); if (ret) return ret; if (super->s_feature_incompat & ~LOGFS_FEATURES_INCOMPAT) return -EIO; if ((super->s_feature_ro_compat & ~LOGFS_FEATURES_RO_COMPAT) && !read_only) return -EIO; ret = logfs_init_rw(sb); if (ret) return ret; ret = logfs_init_areas(sb); if (ret) return ret; ret = logfs_init_gc(sb); if (ret) return ret; ret = logfs_init_journal(sb); if (ret) return ret; return 0; } static void logfs_kill_sb(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); log_super("LogFS: Start unmounting\n"); /* Alias entries slow down mount, so evict as many as possible */ sync_filesystem(sb); logfs_write_anchor(sb); free_areas(sb); /* * From this point on alias entries are simply dropped - and any * writes to the object store are considered bugs. */ log_super("LogFS: Now in shutdown\n"); generic_shutdown_super(sb); super->s_flags |= LOGFS_SB_FLAG_SHUTDOWN; BUG_ON(super->s_dirty_used_bytes || super->s_dirty_free_bytes); logfs_cleanup_gc(sb); logfs_cleanup_journal(sb); logfs_cleanup_areas(sb); logfs_cleanup_rw(sb); if (super->s_erase_page) __free_page(super->s_erase_page); super->s_devops->put_device(super); logfs_mempool_destroy(super->s_btree_pool); logfs_mempool_destroy(super->s_alias_pool); kfree(super); log_super("LogFS: Finished unmounting\n"); } static struct dentry *logfs_get_sb_device(struct logfs_super *super, struct file_system_type *type, int flags) { struct super_block *sb; int err = -ENOMEM; static int mount_count; log_super("LogFS: Start mount %x\n", mount_count++); err = -EINVAL; sb = sget(type, logfs_sb_test, logfs_sb_set, flags | MS_NOATIME, super); if (IS_ERR(sb)) { super->s_devops->put_device(super); kfree(super); return ERR_CAST(sb); } if (sb->s_root) { /* Device is already in use */ super->s_devops->put_device(super); kfree(super); return dget(sb->s_root); } /* * sb->s_maxbytes is limited to 8TB. On 32bit systems, the page cache * only covers 16TB and the upper 8TB are used for indirect blocks. * On 64bit system we could bump up the limit, but that would make * the filesystem incompatible with 32bit systems. */ sb->s_maxbytes = (1ull << 43) - 1; sb->s_max_links = LOGFS_LINK_MAX; sb->s_op = &logfs_super_operations; err = logfs_read_sb(sb, sb->s_flags & MS_RDONLY); if (err) goto err1; sb->s_flags |= MS_ACTIVE; err = logfs_get_sb_final(sb); if (err) { deactivate_locked_super(sb); return ERR_PTR(err); } return dget(sb->s_root); err1: /* no ->s_root, no ->put_super() */ iput(super->s_master_inode); iput(super->s_segfile_inode); iput(super->s_mapping_inode); deactivate_locked_super(sb); return ERR_PTR(err); } static struct dentry *logfs_mount(struct file_system_type *type, int flags, const char *devname, void *data) { ulong mtdnr; struct logfs_super *super; int err; super = kzalloc(sizeof(*super), GFP_KERNEL); if (!super) return ERR_PTR(-ENOMEM); mutex_init(&super->s_dirop_mutex); mutex_init(&super->s_object_alias_mutex); INIT_LIST_HEAD(&super->s_freeing_list); if (!devname) err = logfs_get_sb_bdev(super, type, devname); else if (strncmp(devname, "mtd", 3)) err = logfs_get_sb_bdev(super, type, devname); else { char *garbage; mtdnr = simple_strtoul(devname+3, &garbage, 0); if (*garbage) err = -EINVAL; else err = logfs_get_sb_mtd(super, mtdnr); } if (err) { kfree(super); return ERR_PTR(err); } return logfs_get_sb_device(super, type, flags); } static struct file_system_type logfs_fs_type = { .owner = THIS_MODULE, .name = "logfs", .mount = logfs_mount, .kill_sb = logfs_kill_sb, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("logfs"); static int __init logfs_init(void) { int ret; emergency_page = alloc_pages(GFP_KERNEL, 0); if (!emergency_page) return -ENOMEM; ret = logfs_compr_init(); if (ret) goto out1; ret = logfs_init_inode_cache(); if (ret) goto out2; ret = register_filesystem(&logfs_fs_type); if (!ret) return 0; logfs_destroy_inode_cache(); out2: logfs_compr_exit(); out1: __free_pages(emergency_page, 0); return ret; } static void __exit logfs_exit(void) { unregister_filesystem(&logfs_fs_type); logfs_destroy_inode_cache(); logfs_compr_exit(); __free_pages(emergency_page, 0); } module_init(logfs_init); module_exit(logfs_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Joern Engel <joern@logfs.org>"); MODULE_DESCRIPTION("scalable flash filesystem");
gpl-2.0
BlissRom/kernel_oneplus_msm8974
sound/soc/omap/zoom2.c
4938
5957
/* * zoom2.c -- SoC audio for Zoom2 * * Author: Misael Lopez Cruz <x0052729@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/clk.h> #include <linux/platform_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <mach/gpio.h> #include <mach/board-zoom.h> #include <plat/mcbsp.h> /* Register descriptions for twl4030 codec part */ #include <linux/mfd/twl4030-audio.h> #include <linux/module.h> #include "omap-mcbsp.h" #include "omap-pcm.h" #define ZOOM2_HEADSET_MUX_GPIO (OMAP_MAX_GPIO_LINES + 15) static int zoom2_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; int ret; /* Set the codec system clock for DAC and ADC */ ret = snd_soc_dai_set_sysclk(codec_dai, 0, 26000000, SND_SOC_CLOCK_IN); if (ret < 0) { printk(KERN_ERR "can't set codec system clock\n"); return ret; } return 0; } static struct snd_soc_ops zoom2_ops = { .hw_params = zoom2_hw_params, }; /* Zoom2 machine DAPM */ static const struct snd_soc_dapm_widget zoom2_twl4030_dapm_widgets[] = { SND_SOC_DAPM_MIC("Ext Mic", NULL), SND_SOC_DAPM_SPK("Ext Spk", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_HP("Headset Stereophone", NULL), SND_SOC_DAPM_LINE("Aux In", NULL), }; static const struct snd_soc_dapm_route audio_map[] = { /* External Mics: MAINMIC, SUBMIC with bias*/ {"MAINMIC", NULL, "Mic Bias 1"}, {"SUBMIC", NULL, "Mic Bias 2"}, {"Mic Bias 1", NULL, "Ext Mic"}, {"Mic Bias 2", NULL, "Ext Mic"}, /* External Speakers: HFL, HFR */ {"Ext Spk", NULL, "HFL"}, {"Ext Spk", NULL, "HFR"}, /* Headset Stereophone: HSOL, HSOR */ {"Headset Stereophone", NULL, "HSOL"}, {"Headset Stereophone", NULL, "HSOR"}, /* Headset Mic: HSMIC with bias */ {"HSMIC", NULL, "Headset Mic Bias"}, {"Headset Mic Bias", NULL, "Headset Mic"}, /* Aux In: AUXL, AUXR */ {"Aux In", NULL, "AUXL"}, {"Aux In", NULL, "AUXR"}, }; static int zoom2_twl4030_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; /* TWL4030 not connected pins */ snd_soc_dapm_nc_pin(dapm, "CARKITMIC"); snd_soc_dapm_nc_pin(dapm, "DIGIMIC0"); snd_soc_dapm_nc_pin(dapm, "DIGIMIC1"); snd_soc_dapm_nc_pin(dapm, "EARPIECE"); snd_soc_dapm_nc_pin(dapm, "PREDRIVEL"); snd_soc_dapm_nc_pin(dapm, "PREDRIVER"); snd_soc_dapm_nc_pin(dapm, "CARKITL"); snd_soc_dapm_nc_pin(dapm, "CARKITR"); return 0; } static int zoom2_twl4030_voice_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; unsigned short reg; /* Enable voice interface */ reg = codec->driver->read(codec, TWL4030_REG_VOICE_IF); reg |= TWL4030_VIF_DIN_EN | TWL4030_VIF_DOUT_EN | TWL4030_VIF_EN; codec->driver->write(codec, TWL4030_REG_VOICE_IF, reg); return 0; } /* Digital audio interface glue - connects codec <--> CPU */ static struct snd_soc_dai_link zoom2_dai[] = { { .name = "TWL4030 I2S", .stream_name = "TWL4030 Audio", .cpu_dai_name = "omap-mcbsp.2", .codec_dai_name = "twl4030-hifi", .platform_name = "omap-pcm-audio", .codec_name = "twl4030-codec", .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, .init = zoom2_twl4030_init, .ops = &zoom2_ops, }, { .name = "TWL4030 PCM", .stream_name = "TWL4030 Voice", .cpu_dai_name = "omap-mcbsp.3", .codec_dai_name = "twl4030-voice", .platform_name = "omap-pcm-audio", .codec_name = "twl4030-codec", .dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF | SND_SOC_DAIFMT_CBM_CFM, .init = zoom2_twl4030_voice_init, .ops = &zoom2_ops, }, }; /* Audio machine driver */ static struct snd_soc_card snd_soc_zoom2 = { .name = "Zoom2", .owner = THIS_MODULE, .dai_link = zoom2_dai, .num_links = ARRAY_SIZE(zoom2_dai), .dapm_widgets = zoom2_twl4030_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(zoom2_twl4030_dapm_widgets), .dapm_routes = audio_map, .num_dapm_routes = ARRAY_SIZE(audio_map), }; static struct platform_device *zoom2_snd_device; static int __init zoom2_soc_init(void) { int ret; if (!machine_is_omap_zoom2()) return -ENODEV; printk(KERN_INFO "Zoom2 SoC init\n"); zoom2_snd_device = platform_device_alloc("soc-audio", -1); if (!zoom2_snd_device) { printk(KERN_ERR "Platform device allocation failed\n"); return -ENOMEM; } platform_set_drvdata(zoom2_snd_device, &snd_soc_zoom2); ret = platform_device_add(zoom2_snd_device); if (ret) goto err1; BUG_ON(gpio_request(ZOOM2_HEADSET_MUX_GPIO, "hs_mux") < 0); gpio_direction_output(ZOOM2_HEADSET_MUX_GPIO, 0); BUG_ON(gpio_request(ZOOM2_HEADSET_EXTMUTE_GPIO, "ext_mute") < 0); gpio_direction_output(ZOOM2_HEADSET_EXTMUTE_GPIO, 0); return 0; err1: printk(KERN_ERR "Unable to add platform device\n"); platform_device_put(zoom2_snd_device); return ret; } module_init(zoom2_soc_init); static void __exit zoom2_soc_exit(void) { gpio_free(ZOOM2_HEADSET_MUX_GPIO); gpio_free(ZOOM2_HEADSET_EXTMUTE_GPIO); platform_device_unregister(zoom2_snd_device); } module_exit(zoom2_soc_exit); MODULE_AUTHOR("Misael Lopez Cruz <x0052729@ti.com>"); MODULE_DESCRIPTION("ALSA SoC Zoom2"); MODULE_LICENSE("GPL");
gpl-2.0
SlimSaber/android_kernel_oppo_msm8974
sound/soc/sh/fsi-ak4642.c
4938
2510
/* * FSI-AK464x sound support for ms7724se * * Copyright (C) 2009 Renesas Solutions Corp. * Kuninori Morimoto <morimoto.kuninori@renesas.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/platform_device.h> #include <linux/module.h> #include <sound/sh_fsi.h> struct fsi_ak4642_data { const char *name; const char *card; const char *cpu_dai; const char *codec; const char *platform; int id; }; static int fsi_ak4642_dai_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai *codec = rtd->codec_dai; struct snd_soc_dai *cpu = rtd->cpu_dai; int ret; ret = snd_soc_dai_set_fmt(codec, SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM); if (ret < 0) return ret; ret = snd_soc_dai_set_sysclk(codec, 0, 11289600, 0); if (ret < 0) return ret; ret = snd_soc_dai_set_fmt(cpu, SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBS_CFS); return ret; } static struct snd_soc_dai_link fsi_dai_link = { .codec_dai_name = "ak4642-hifi", .init = fsi_ak4642_dai_init, }; static struct snd_soc_card fsi_soc_card = { .owner = THIS_MODULE, .dai_link = &fsi_dai_link, .num_links = 1, }; static struct platform_device *fsi_snd_device; static int fsi_ak4642_probe(struct platform_device *pdev) { int ret = -ENOMEM; struct fsi_ak4642_info *pinfo = pdev->dev.platform_data; if (!pinfo) { dev_err(&pdev->dev, "no info for fsi ak4642\n"); goto out; } fsi_snd_device = platform_device_alloc("soc-audio", pinfo->id); if (!fsi_snd_device) goto out; fsi_dai_link.name = pinfo->name; fsi_dai_link.stream_name = pinfo->name; fsi_dai_link.cpu_dai_name = pinfo->cpu_dai; fsi_dai_link.platform_name = pinfo->platform; fsi_dai_link.codec_name = pinfo->codec; fsi_soc_card.name = pinfo->card; platform_set_drvdata(fsi_snd_device, &fsi_soc_card); ret = platform_device_add(fsi_snd_device); if (ret) platform_device_put(fsi_snd_device); out: return ret; } static int fsi_ak4642_remove(struct platform_device *pdev) { platform_device_unregister(fsi_snd_device); return 0; } static struct platform_driver fsi_ak4642 = { .driver = { .name = "fsi-ak4642-audio", }, .probe = fsi_ak4642_probe, .remove = fsi_ak4642_remove, }; module_platform_driver(fsi_ak4642); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Generic SH4 FSI-AK4642 sound card"); MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
gpl-2.0
FlorentRevest/kernel
drivers/media/i2c/cs53l32a.c
7242
6736
/* * cs53l32a (Adaptec AVC-2010 and AVC-2410) i2c ivtv driver. * Copyright (C) 2005 Martin Vaughan * * Audio source switching for Adaptec AVC-2410 added by Trev Jackson * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-ctrls.h> MODULE_DESCRIPTION("i2c device driver for cs53l32a Audio ADC"); MODULE_AUTHOR("Martin Vaughan"); MODULE_LICENSE("GPL"); static bool debug; module_param(debug, bool, 0644); MODULE_PARM_DESC(debug, "Debugging messages, 0=Off (default), 1=On"); struct cs53l32a_state { struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; }; static inline struct cs53l32a_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct cs53l32a_state, sd); } static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct cs53l32a_state, hdl)->sd; } /* ----------------------------------------------------------------------- */ static int cs53l32a_write(struct v4l2_subdev *sd, u8 reg, u8 value) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_write_byte_data(client, reg, value); } static int cs53l32a_read(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_read_byte_data(client, reg); } static int cs53l32a_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { /* There are 2 physical inputs, but the second input can be placed in two modes, the first mode bypasses the PGA (gain), the second goes through the PGA. Hence there are three possible inputs to choose from. */ if (input > 2) { v4l2_err(sd, "Invalid input %d.\n", input); return -EINVAL; } cs53l32a_write(sd, 0x01, 0x01 + (input << 4)); return 0; } static int cs53l32a_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: cs53l32a_write(sd, 0x03, ctrl->val ? 0xf0 : 0x30); return 0; case V4L2_CID_AUDIO_VOLUME: cs53l32a_write(sd, 0x04, (u8)ctrl->val); cs53l32a_write(sd, 0x05, (u8)ctrl->val); return 0; } return -EINVAL; } static int cs53l32a_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_CS53l32A, 0); } static int cs53l32a_log_status(struct v4l2_subdev *sd) { struct cs53l32a_state *state = to_state(sd); u8 v = cs53l32a_read(sd, 0x01); v4l2_info(sd, "Input: %d\n", (v >> 4) & 3); v4l2_ctrl_handler_log_status(&state->hdl, sd->name); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_ctrl_ops cs53l32a_ctrl_ops = { .s_ctrl = cs53l32a_s_ctrl, }; static const struct v4l2_subdev_core_ops cs53l32a_core_ops = { .log_status = cs53l32a_log_status, .g_chip_ident = cs53l32a_g_chip_ident, .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, .g_ctrl = v4l2_subdev_g_ctrl, .s_ctrl = v4l2_subdev_s_ctrl, .queryctrl = v4l2_subdev_queryctrl, .querymenu = v4l2_subdev_querymenu, }; static const struct v4l2_subdev_audio_ops cs53l32a_audio_ops = { .s_routing = cs53l32a_s_routing, }; static const struct v4l2_subdev_ops cs53l32a_ops = { .core = &cs53l32a_core_ops, .audio = &cs53l32a_audio_ops, }; /* ----------------------------------------------------------------------- */ /* i2c implementation */ /* * Generic i2c probe * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' */ static int cs53l32a_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct cs53l32a_state *state; struct v4l2_subdev *sd; int i; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; if (!id) strlcpy(client->name, "cs53l32a", sizeof(client->name)); v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); state = kzalloc(sizeof(struct cs53l32a_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &cs53l32a_ops); for (i = 1; i <= 7; i++) { u8 v = cs53l32a_read(sd, i); v4l2_dbg(1, debug, sd, "Read Reg %d %02x\n", i, v); } v4l2_ctrl_handler_init(&state->hdl, 2); v4l2_ctrl_new_std(&state->hdl, &cs53l32a_ctrl_ops, V4L2_CID_AUDIO_VOLUME, -96, 12, 1, 0); v4l2_ctrl_new_std(&state->hdl, &cs53l32a_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0); sd->ctrl_handler = &state->hdl; if (state->hdl.error) { int err = state->hdl.error; v4l2_ctrl_handler_free(&state->hdl); kfree(state); return err; } /* Set cs53l32a internal register for Adaptec 2010/2410 setup */ cs53l32a_write(sd, 0x01, 0x21); cs53l32a_write(sd, 0x02, 0x29); cs53l32a_write(sd, 0x03, 0x30); cs53l32a_write(sd, 0x04, 0x00); cs53l32a_write(sd, 0x05, 0x00); cs53l32a_write(sd, 0x06, 0x00); cs53l32a_write(sd, 0x07, 0x00); /* Display results, should be 0x21,0x29,0x30,0x00,0x00,0x00,0x00 */ for (i = 1; i <= 7; i++) { u8 v = cs53l32a_read(sd, i); v4l2_dbg(1, debug, sd, "Read Reg %d %02x\n", i, v); } return 0; } static int cs53l32a_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct cs53l32a_state *state = to_state(sd); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(&state->hdl); kfree(state); return 0; } static const struct i2c_device_id cs53l32a_id[] = { { "cs53l32a", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, cs53l32a_id); static struct i2c_driver cs53l32a_driver = { .driver = { .owner = THIS_MODULE, .name = "cs53l32a", }, .probe = cs53l32a_probe, .remove = cs53l32a_remove, .id_table = cs53l32a_id, }; module_i2c_driver(cs53l32a_driver);
gpl-2.0
Xperia-P/android_kernel_sony_u8500
arch/mips/pnx833x/common/reset.c
8778
1292
/* * reset.c: reset support for PNX833X. * * Copyright 2008 NXP Semiconductors * Chris Steel <chris.steel@nxp.com> * Daniel Laird <daniel.j.laird@nxp.com> * * Based on software written by: * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/reboot.h> #include <pnx833x.h> void pnx833x_machine_restart(char *command) { PNX833X_RESET_CONTROL_2 = 0; PNX833X_RESET_CONTROL = 0; } void pnx833x_machine_halt(void) { while (1) __asm__ __volatile__ ("wait"); } void pnx833x_machine_power_off(void) { pnx833x_machine_halt(); }
gpl-2.0
MaxiCM/android_kernel_google_msm
drivers/isdn/hisax/isdnl2.c
9034
42825
/* $Id: isdnl2.c,v 2.30.2.4 2004/02/11 13:21:34 keil Exp $ * * Author Karsten Keil * based on the teles driver from Jan den Ouden * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * For changes and modifications please read * Documentation/isdn/HiSax.cert * * Thanks to Jan den Ouden * Fritz Elfert * */ #include <linux/init.h> #include <linux/gfp.h> #include "hisax.h" #include "isdnl2.h" const char *l2_revision = "$Revision: 2.30.2.4 $"; static void l2m_debug(struct FsmInst *fi, char *fmt, ...); static struct Fsm l2fsm; enum { ST_L2_1, ST_L2_2, ST_L2_3, ST_L2_4, ST_L2_5, ST_L2_6, ST_L2_7, ST_L2_8, }; #define L2_STATE_COUNT (ST_L2_8 + 1) static char *strL2State[] = { "ST_L2_1", "ST_L2_2", "ST_L2_3", "ST_L2_4", "ST_L2_5", "ST_L2_6", "ST_L2_7", "ST_L2_8", }; enum { EV_L2_UI, EV_L2_SABME, EV_L2_DISC, EV_L2_DM, EV_L2_UA, EV_L2_FRMR, EV_L2_SUPER, EV_L2_I, EV_L2_DL_DATA, EV_L2_ACK_PULL, EV_L2_DL_UNIT_DATA, EV_L2_DL_ESTABLISH_REQ, EV_L2_DL_RELEASE_REQ, EV_L2_MDL_ASSIGN, EV_L2_MDL_REMOVE, EV_L2_MDL_ERROR, EV_L1_DEACTIVATE, EV_L2_T200, EV_L2_T203, EV_L2_SET_OWN_BUSY, EV_L2_CLEAR_OWN_BUSY, EV_L2_FRAME_ERROR, }; #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR + 1) static char *strL2Event[] = { "EV_L2_UI", "EV_L2_SABME", "EV_L2_DISC", "EV_L2_DM", "EV_L2_UA", "EV_L2_FRMR", "EV_L2_SUPER", "EV_L2_I", "EV_L2_DL_DATA", "EV_L2_ACK_PULL", "EV_L2_DL_UNIT_DATA", "EV_L2_DL_ESTABLISH_REQ", "EV_L2_DL_RELEASE_REQ", "EV_L2_MDL_ASSIGN", "EV_L2_MDL_REMOVE", "EV_L2_MDL_ERROR", "EV_L1_DEACTIVATE", "EV_L2_T200", "EV_L2_T203", "EV_L2_SET_OWN_BUSY", "EV_L2_CLEAR_OWN_BUSY", "EV_L2_FRAME_ERROR", }; static int l2addrsize(struct Layer2 *l2); static void set_peer_busy(struct Layer2 *l2) { test_and_set_bit(FLG_PEER_BUSY, &l2->flag); if (!skb_queue_empty(&l2->i_queue) || !skb_queue_empty(&l2->ui_queue)) test_and_set_bit(FLG_L2BLOCK, &l2->flag); } static void clear_peer_busy(struct Layer2 *l2) { if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag)) test_and_clear_bit(FLG_L2BLOCK, &l2->flag); } static void InitWin(struct Layer2 *l2) { int i; for (i = 0; i < MAX_WINDOW; i++) l2->windowar[i] = NULL; } static int freewin1(struct Layer2 *l2) { int i, cnt = 0; for (i = 0; i < MAX_WINDOW; i++) { if (l2->windowar[i]) { cnt++; dev_kfree_skb(l2->windowar[i]); l2->windowar[i] = NULL; } } return cnt; } static inline void freewin(struct PStack *st) { freewin1(&st->l2); } static void ReleaseWin(struct Layer2 *l2) { int cnt; if ((cnt = freewin1(l2))) printk(KERN_WARNING "isdl2 freed %d skbuffs in release\n", cnt); } static inline unsigned int cansend(struct PStack *st) { unsigned int p1; if (test_bit(FLG_MOD128, &st->l2.flag)) p1 = (st->l2.vs - st->l2.va) % 128; else p1 = (st->l2.vs - st->l2.va) % 8; return ((p1 < st->l2.window) && !test_bit(FLG_PEER_BUSY, &st->l2.flag)); } static inline void clear_exception(struct Layer2 *l2) { test_and_clear_bit(FLG_ACK_PEND, &l2->flag); test_and_clear_bit(FLG_REJEXC, &l2->flag); test_and_clear_bit(FLG_OWN_BUSY, &l2->flag); clear_peer_busy(l2); } static inline int l2headersize(struct Layer2 *l2, int ui) { return (((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) + (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1)); } inline int l2addrsize(struct Layer2 *l2) { return (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1); } static int sethdraddr(struct Layer2 *l2, u_char *header, int rsp) { u_char *ptr = header; int crbit = rsp; if (test_bit(FLG_LAPD, &l2->flag)) { *ptr++ = (l2->sap << 2) | (rsp ? 2 : 0); *ptr++ = (l2->tei << 1) | 1; return (2); } else { if (test_bit(FLG_ORIG, &l2->flag)) crbit = !crbit; if (crbit) *ptr++ = 1; else *ptr++ = 3; return (1); } } static inline void enqueue_super(struct PStack *st, struct sk_buff *skb) { if (test_bit(FLG_LAPB, &st->l2.flag)) st->l1.bcs->tx_cnt += skb->len; st->l2.l2l1(st, PH_DATA | REQUEST, skb); } #define enqueue_ui(a, b) enqueue_super(a, b) static inline int IsUI(u_char *data) { return ((data[0] & 0xef) == UI); } static inline int IsUA(u_char *data) { return ((data[0] & 0xef) == UA); } static inline int IsDM(u_char *data) { return ((data[0] & 0xef) == DM); } static inline int IsDISC(u_char *data) { return ((data[0] & 0xef) == DISC); } static inline int IsSFrame(u_char *data, struct PStack *st) { register u_char d = *data; if (!test_bit(FLG_MOD128, &st->l2.flag)) d &= 0xf; return (((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c)); } static inline int IsSABME(u_char *data, struct PStack *st) { u_char d = data[0] & ~0x10; return (test_bit(FLG_MOD128, &st->l2.flag) ? d == SABME : d == SABM); } static inline int IsREJ(u_char *data, struct PStack *st) { return (test_bit(FLG_MOD128, &st->l2.flag) ? data[0] == REJ : (data[0] & 0xf) == REJ); } static inline int IsFRMR(u_char *data) { return ((data[0] & 0xef) == FRMR); } static inline int IsRNR(u_char *data, struct PStack *st) { return (test_bit(FLG_MOD128, &st->l2.flag) ? data[0] == RNR : (data[0] & 0xf) == RNR); } static int iframe_error(struct PStack *st, struct sk_buff *skb) { int i = l2addrsize(&st->l2) + (test_bit(FLG_MOD128, &st->l2.flag) ? 2 : 1); int rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &st->l2.flag)) rsp = !rsp; if (rsp) return 'L'; if (skb->len < i) return 'N'; if ((skb->len - i) > st->l2.maxlen) return 'O'; return 0; } static int super_error(struct PStack *st, struct sk_buff *skb) { if (skb->len != l2addrsize(&st->l2) + (test_bit(FLG_MOD128, &st->l2.flag) ? 2 : 1)) return 'N'; return 0; } static int unnum_error(struct PStack *st, struct sk_buff *skb, int wantrsp) { int rsp = (*skb->data & 0x2) >> 1; if (test_bit(FLG_ORIG, &st->l2.flag)) rsp = !rsp; if (rsp != wantrsp) return 'L'; if (skb->len != l2addrsize(&st->l2) + 1) return 'N'; return 0; } static int UI_error(struct PStack *st, struct sk_buff *skb) { int rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &st->l2.flag)) rsp = !rsp; if (rsp) return 'L'; if (skb->len > st->l2.maxlen + l2addrsize(&st->l2) + 1) return 'O'; return 0; } static int FRMR_error(struct PStack *st, struct sk_buff *skb) { int headers = l2addrsize(&st->l2) + 1; u_char *datap = skb->data + headers; int rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &st->l2.flag)) rsp = !rsp; if (!rsp) return 'L'; if (test_bit(FLG_MOD128, &st->l2.flag)) { if (skb->len < headers + 5) return 'N'; else l2m_debug(&st->l2.l2m, "FRMR information %2x %2x %2x %2x %2x", datap[0], datap[1], datap[2], datap[3], datap[4]); } else { if (skb->len < headers + 3) return 'N'; else l2m_debug(&st->l2.l2m, "FRMR information %2x %2x %2x", datap[0], datap[1], datap[2]); } return 0; } static unsigned int legalnr(struct PStack *st, unsigned int nr) { struct Layer2 *l2 = &st->l2; if (test_bit(FLG_MOD128, &l2->flag)) return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128); else return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8); } static void setva(struct PStack *st, unsigned int nr) { struct Layer2 *l2 = &st->l2; int len; u_long flags; spin_lock_irqsave(&l2->lock, flags); while (l2->va != nr) { (l2->va)++; if (test_bit(FLG_MOD128, &l2->flag)) l2->va %= 128; else l2->va %= 8; len = l2->windowar[l2->sow]->len; if (PACKET_NOACK == l2->windowar[l2->sow]->pkt_type) len = -1; dev_kfree_skb(l2->windowar[l2->sow]); l2->windowar[l2->sow] = NULL; l2->sow = (l2->sow + 1) % l2->window; spin_unlock_irqrestore(&l2->lock, flags); if (test_bit(FLG_LLI_L2WAKEUP, &st->lli.flag) && (len >= 0)) lli_writewakeup(st, len); spin_lock_irqsave(&l2->lock, flags); } spin_unlock_irqrestore(&l2->lock, flags); } static void send_uframe(struct PStack *st, u_char cmd, u_char cr) { struct sk_buff *skb; u_char tmp[MAX_HEADER_LEN]; int i; i = sethdraddr(&st->l2, tmp, cr); tmp[i++] = cmd; if (!(skb = alloc_skb(i, GFP_ATOMIC))) { printk(KERN_WARNING "isdl2 can't alloc sbbuff for send_uframe\n"); return; } memcpy(skb_put(skb, i), tmp, i); enqueue_super(st, skb); } static inline u_char get_PollFlag(struct PStack *st, struct sk_buff *skb) { return (skb->data[l2addrsize(&(st->l2))] & 0x10); } static inline u_char get_PollFlagFree(struct PStack *st, struct sk_buff *skb) { u_char PF; PF = get_PollFlag(st, skb); dev_kfree_skb(skb); return (PF); } static inline void start_t200(struct PStack *st, int i) { FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, i); test_and_set_bit(FLG_T200_RUN, &st->l2.flag); } static inline void restart_t200(struct PStack *st, int i) { FsmRestartTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, i); test_and_set_bit(FLG_T200_RUN, &st->l2.flag); } static inline void stop_t200(struct PStack *st, int i) { if (test_and_clear_bit(FLG_T200_RUN, &st->l2.flag)) FsmDelTimer(&st->l2.t200, i); } static inline void st5_dl_release_l2l3(struct PStack *st) { int pr; if (test_and_clear_bit(FLG_PEND_REL, &st->l2.flag)) pr = DL_RELEASE | CONFIRM; else pr = DL_RELEASE | INDICATION; st->l2.l2l3(st, pr, NULL); } static inline void lapb_dl_release_l2l3(struct PStack *st, int f) { if (test_bit(FLG_LAPB, &st->l2.flag)) st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL); st->l2.l2l3(st, DL_RELEASE | f, NULL); } static void establishlink(struct FsmInst *fi) { struct PStack *st = fi->userdata; u_char cmd; clear_exception(&st->l2); st->l2.rc = 0; cmd = (test_bit(FLG_MOD128, &st->l2.flag) ? SABME : SABM) | 0x10; send_uframe(st, cmd, CMD); FsmDelTimer(&st->l2.t203, 1); restart_t200(st, 1); test_and_clear_bit(FLG_PEND_REL, &st->l2.flag); freewin(st); FsmChangeState(fi, ST_L2_5); } static void l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct PStack *st = fi->userdata; if (get_PollFlagFree(st, skb)) st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'C'); else st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'D'); } static void l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct PStack *st = fi->userdata; if (get_PollFlagFree(st, skb)) st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'B'); else { st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'E'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &st->l2.flag); } } static void l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct PStack *st = fi->userdata; if (get_PollFlagFree(st, skb)) st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'B'); else { st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'E'); } establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &st->l2.flag); } static void l2_go_st3(struct FsmInst *fi, int event, void *arg) { FsmChangeState(fi, ST_L2_3); } static void l2_mdl_assign(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; FsmChangeState(fi, ST_L2_3); st->l2.l2tei(st, MDL_ASSIGN | INDICATION, NULL); } static void l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; skb_queue_tail(&st->l2.ui_queue, skb); FsmChangeState(fi, ST_L2_2); st->l2.l2tei(st, MDL_ASSIGN | INDICATION, NULL); } static void l2_queue_ui(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; skb_queue_tail(&st->l2.ui_queue, skb); } static void tx_ui(struct PStack *st) { struct sk_buff *skb; u_char header[MAX_HEADER_LEN]; int i; i = sethdraddr(&(st->l2), header, CMD); header[i++] = UI; while ((skb = skb_dequeue(&st->l2.ui_queue))) { memcpy(skb_push(skb, i), header, i); enqueue_ui(st, skb); } } static void l2_send_ui(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; skb_queue_tail(&st->l2.ui_queue, skb); tx_ui(st); } static void l2_got_ui(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; skb_pull(skb, l2headersize(&st->l2, 1)); st->l2.l2l3(st, DL_UNIT_DATA | INDICATION, skb); /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * in states 1-3 for broadcast */ } static void l2_establish(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; establishlink(fi); test_and_set_bit(FLG_L3_INIT, &st->l2.flag); } static void l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.i_queue); test_and_set_bit(FLG_L3_INIT, &st->l2.flag); test_and_clear_bit(FLG_PEND_REL, &st->l2.flag); } static void l2_l3_reestablish(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.i_queue); establishlink(fi); test_and_set_bit(FLG_L3_INIT, &st->l2.flag); } static void l2_release(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL); } static void l2_pend_rel(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; test_and_set_bit(FLG_PEND_REL, &st->l2.flag); } static void l2_disconnect(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.i_queue); freewin(st); FsmChangeState(fi, ST_L2_6); st->l2.rc = 0; send_uframe(st, DISC | 0x10, CMD); FsmDelTimer(&st->l2.t203, 1); restart_t200(st, 2); } static void l2_start_multi(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; send_uframe(st, UA | get_PollFlagFree(st, skb), RSP); clear_exception(&st->l2); st->l2.vs = 0; st->l2.va = 0; st->l2.vr = 0; st->l2.sow = 0; FsmChangeState(fi, ST_L2_7); FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 3); st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL); } static void l2_send_UA(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; send_uframe(st, UA | get_PollFlagFree(st, skb), RSP); } static void l2_send_DM(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; send_uframe(st, DM | get_PollFlagFree(st, skb), RSP); } static void l2_restart_multi(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; int est = 0, state; state = fi->state; send_uframe(st, UA | get_PollFlagFree(st, skb), RSP); st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'F'); if (st->l2.vs != st->l2.va) { skb_queue_purge(&st->l2.i_queue); est = 1; } clear_exception(&st->l2); st->l2.vs = 0; st->l2.va = 0; st->l2.vr = 0; st->l2.sow = 0; FsmChangeState(fi, ST_L2_7); stop_t200(st, 3); FsmRestartTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 3); if (est) st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL); if ((ST_L2_7 == state) || (ST_L2_8 == state)) if (!skb_queue_empty(&st->l2.i_queue) && cansend(st)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } static void l2_stop_multi(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; FsmChangeState(fi, ST_L2_4); FsmDelTimer(&st->l2.t203, 3); stop_t200(st, 4); send_uframe(st, UA | get_PollFlagFree(st, skb), RSP); skb_queue_purge(&st->l2.i_queue); freewin(st); lapb_dl_release_l2l3(st, INDICATION); } static void l2_connected(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; int pr = -1; if (!get_PollFlag(st, skb)) { l2_mdl_error_ua(fi, event, arg); return; } dev_kfree_skb(skb); if (test_and_clear_bit(FLG_PEND_REL, &st->l2.flag)) l2_disconnect(fi, event, arg); if (test_and_clear_bit(FLG_L3_INIT, &st->l2.flag)) { pr = DL_ESTABLISH | CONFIRM; } else if (st->l2.vs != st->l2.va) { skb_queue_purge(&st->l2.i_queue); pr = DL_ESTABLISH | INDICATION; } stop_t200(st, 5); st->l2.vr = 0; st->l2.vs = 0; st->l2.va = 0; st->l2.sow = 0; FsmChangeState(fi, ST_L2_7); FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 4); if (pr != -1) st->l2.l2l3(st, pr, NULL); if (!skb_queue_empty(&st->l2.i_queue) && cansend(st)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } static void l2_released(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; if (!get_PollFlag(st, skb)) { l2_mdl_error_ua(fi, event, arg); return; } dev_kfree_skb(skb); stop_t200(st, 6); lapb_dl_release_l2l3(st, CONFIRM); FsmChangeState(fi, ST_L2_4); } static void l2_reestablish(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; if (!get_PollFlagFree(st, skb)) { establishlink(fi); test_and_set_bit(FLG_L3_INIT, &st->l2.flag); } } static void l2_st5_dm_release(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; if (get_PollFlagFree(st, skb)) { stop_t200(st, 7); if (!test_bit(FLG_L3_INIT, &st->l2.flag)) skb_queue_purge(&st->l2.i_queue); if (test_bit(FLG_LAPB, &st->l2.flag)) st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL); st5_dl_release_l2l3(st); FsmChangeState(fi, ST_L2_4); } } static void l2_st6_dm_release(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; if (get_PollFlagFree(st, skb)) { stop_t200(st, 8); lapb_dl_release_l2l3(st, CONFIRM); FsmChangeState(fi, ST_L2_4); } } static inline void enquiry_cr(struct PStack *st, u_char typ, u_char cr, u_char pf) { struct sk_buff *skb; struct Layer2 *l2; u_char tmp[MAX_HEADER_LEN]; int i; l2 = &st->l2; i = sethdraddr(l2, tmp, cr); if (test_bit(FLG_MOD128, &l2->flag)) { tmp[i++] = typ; tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0); } else tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0); if (!(skb = alloc_skb(i, GFP_ATOMIC))) { printk(KERN_WARNING "isdl2 can't alloc sbbuff for enquiry_cr\n"); return; } memcpy(skb_put(skb, i), tmp, i); enqueue_super(st, skb); } static inline void enquiry_response(struct PStack *st) { if (test_bit(FLG_OWN_BUSY, &st->l2.flag)) enquiry_cr(st, RNR, RSP, 1); else enquiry_cr(st, RR, RSP, 1); test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); } static inline void transmit_enquiry(struct PStack *st) { if (test_bit(FLG_OWN_BUSY, &st->l2.flag)) enquiry_cr(st, RNR, CMD, 1); else enquiry_cr(st, RR, CMD, 1); test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); start_t200(st, 9); } static void nrerrorrecovery(struct FsmInst *fi) { struct PStack *st = fi->userdata; st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'J'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &st->l2.flag); } static void invoke_retransmission(struct PStack *st, unsigned int nr) { struct Layer2 *l2 = &st->l2; u_int p1; u_long flags; spin_lock_irqsave(&l2->lock, flags); if (l2->vs != nr) { while (l2->vs != nr) { (l2->vs)--; if (test_bit(FLG_MOD128, &l2->flag)) { l2->vs %= 128; p1 = (l2->vs - l2->va) % 128; } else { l2->vs %= 8; p1 = (l2->vs - l2->va) % 8; } p1 = (p1 + l2->sow) % l2->window; if (test_bit(FLG_LAPB, &l2->flag)) st->l1.bcs->tx_cnt += l2->windowar[p1]->len + l2headersize(l2, 0); skb_queue_head(&l2->i_queue, l2->windowar[p1]); l2->windowar[p1] = NULL; } spin_unlock_irqrestore(&l2->lock, flags); st->l2.l2l1(st, PH_PULL | REQUEST, NULL); return; } spin_unlock_irqrestore(&l2->lock, flags); } static void l2_st7_got_super(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; int PollFlag, rsp, typ = RR; unsigned int nr; struct Layer2 *l2 = &st->l2; rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &l2->flag)) rsp = !rsp; skb_pull(skb, l2addrsize(l2)); if (IsRNR(skb->data, st)) { set_peer_busy(l2); typ = RNR; } else clear_peer_busy(l2); if (IsREJ(skb->data, st)) typ = REJ; if (test_bit(FLG_MOD128, &l2->flag)) { PollFlag = (skb->data[1] & 0x1) == 0x1; nr = skb->data[1] >> 1; } else { PollFlag = (skb->data[0] & 0x10); nr = (skb->data[0] >> 5) & 0x7; } dev_kfree_skb(skb); if (PollFlag) { if (rsp) st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'A'); else enquiry_response(st); } if (legalnr(st, nr)) { if (typ == REJ) { setva(st, nr); invoke_retransmission(st, nr); stop_t200(st, 10); if (FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 6)) l2m_debug(&st->l2.l2m, "Restart T203 ST7 REJ"); } else if ((nr == l2->vs) && (typ == RR)) { setva(st, nr); stop_t200(st, 11); FsmRestartTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 7); } else if ((l2->va != nr) || (typ == RNR)) { setva(st, nr); if (typ != RR) FsmDelTimer(&st->l2.t203, 9); restart_t200(st, 12); } if (!skb_queue_empty(&st->l2.i_queue) && (typ == RR)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } else nrerrorrecovery(fi); } static void l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; if (test_bit(FLG_LAPB, &st->l2.flag)) st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0); if (!test_bit(FLG_L3_INIT, &st->l2.flag)) skb_queue_tail(&st->l2.i_queue, skb); else dev_kfree_skb(skb); } static void l2_feed_i_pull(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; if (test_bit(FLG_LAPB, &st->l2.flag)) st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0); skb_queue_tail(&st->l2.i_queue, skb); st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } static void l2_feed_iqueue(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; if (test_bit(FLG_LAPB, &st->l2.flag)) st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0); skb_queue_tail(&st->l2.i_queue, skb); } static void l2_got_iframe(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; struct Layer2 *l2 = &(st->l2); int PollFlag, ns, i; unsigned int nr; i = l2addrsize(l2); if (test_bit(FLG_MOD128, &l2->flag)) { PollFlag = ((skb->data[i + 1] & 0x1) == 0x1); ns = skb->data[i] >> 1; nr = (skb->data[i + 1] >> 1) & 0x7f; } else { PollFlag = (skb->data[i] & 0x10); ns = (skb->data[i] >> 1) & 0x7; nr = (skb->data[i] >> 5) & 0x7; } if (test_bit(FLG_OWN_BUSY, &l2->flag)) { dev_kfree_skb(skb); if (PollFlag) enquiry_response(st); } else if (l2->vr == ns) { (l2->vr)++; if (test_bit(FLG_MOD128, &l2->flag)) l2->vr %= 128; else l2->vr %= 8; test_and_clear_bit(FLG_REJEXC, &l2->flag); if (PollFlag) enquiry_response(st); else test_and_set_bit(FLG_ACK_PEND, &l2->flag); skb_pull(skb, l2headersize(l2, 0)); st->l2.l2l3(st, DL_DATA | INDICATION, skb); } else { /* n(s)!=v(r) */ dev_kfree_skb(skb); if (test_and_set_bit(FLG_REJEXC, &l2->flag)) { if (PollFlag) enquiry_response(st); } else { enquiry_cr(st, REJ, RSP, PollFlag); test_and_clear_bit(FLG_ACK_PEND, &l2->flag); } } if (legalnr(st, nr)) { if (!test_bit(FLG_PEER_BUSY, &st->l2.flag) && (fi->state == ST_L2_7)) { if (nr == st->l2.vs) { stop_t200(st, 13); FsmRestartTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 7); } else if (nr != st->l2.va) restart_t200(st, 14); } setva(st, nr); } else { nrerrorrecovery(fi); return; } if (!skb_queue_empty(&st->l2.i_queue) && (fi->state == ST_L2_7)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); if (test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag)) enquiry_cr(st, RR, RSP, 0); } static void l2_got_tei(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; st->l2.tei = (long) arg; if (fi->state == ST_L2_3) { establishlink(fi); test_and_set_bit(FLG_L3_INIT, &st->l2.flag); } else FsmChangeState(fi, ST_L2_4); if (!skb_queue_empty(&st->l2.ui_queue)) tx_ui(st); } static void l2_st5_tout_200(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; if (test_bit(FLG_LAPD, &st->l2.flag) && test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) { FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9); } else if (st->l2.rc == st->l2.N200) { FsmChangeState(fi, ST_L2_4); test_and_clear_bit(FLG_T200_RUN, &st->l2.flag); skb_queue_purge(&st->l2.i_queue); st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'G'); if (test_bit(FLG_LAPB, &st->l2.flag)) st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL); st5_dl_release_l2l3(st); } else { st->l2.rc++; FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9); send_uframe(st, (test_bit(FLG_MOD128, &st->l2.flag) ? SABME : SABM) | 0x10, CMD); } } static void l2_st6_tout_200(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; if (test_bit(FLG_LAPD, &st->l2.flag) && test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) { FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9); } else if (st->l2.rc == st->l2.N200) { FsmChangeState(fi, ST_L2_4); test_and_clear_bit(FLG_T200_RUN, &st->l2.flag); st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'H'); lapb_dl_release_l2l3(st, CONFIRM); } else { st->l2.rc++; FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9); send_uframe(st, DISC | 0x10, CMD); } } static void l2_st7_tout_200(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; if (test_bit(FLG_LAPD, &st->l2.flag) && test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) { FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9); return; } test_and_clear_bit(FLG_T200_RUN, &st->l2.flag); st->l2.rc = 0; FsmChangeState(fi, ST_L2_8); transmit_enquiry(st); st->l2.rc++; } static void l2_st8_tout_200(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; if (test_bit(FLG_LAPD, &st->l2.flag) && test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) { FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9); return; } test_and_clear_bit(FLG_T200_RUN, &st->l2.flag); if (st->l2.rc == st->l2.N200) { st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'I'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &st->l2.flag); } else { transmit_enquiry(st); st->l2.rc++; } } static void l2_st7_tout_203(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; if (test_bit(FLG_LAPD, &st->l2.flag) && test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) { FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 9); return; } FsmChangeState(fi, ST_L2_8); transmit_enquiry(st); st->l2.rc = 0; } static void l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb; struct Layer2 *l2 = &st->l2; u_char header[MAX_HEADER_LEN]; int i, hdr_space_needed; int unsigned p1; u_long flags; if (!cansend(st)) return; skb = skb_dequeue(&l2->i_queue); if (!skb) return; hdr_space_needed = l2headersize(l2, 0); if (hdr_space_needed > skb_headroom(skb)) { struct sk_buff *orig_skb = skb; skb = skb_realloc_headroom(skb, hdr_space_needed); if (!skb) { dev_kfree_skb(orig_skb); return; } } spin_lock_irqsave(&l2->lock, flags); if (test_bit(FLG_MOD128, &l2->flag)) p1 = (l2->vs - l2->va) % 128; else p1 = (l2->vs - l2->va) % 8; p1 = (p1 + l2->sow) % l2->window; if (l2->windowar[p1]) { printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n", p1); dev_kfree_skb(l2->windowar[p1]); } l2->windowar[p1] = skb_clone(skb, GFP_ATOMIC); i = sethdraddr(&st->l2, header, CMD); if (test_bit(FLG_MOD128, &l2->flag)) { header[i++] = l2->vs << 1; header[i++] = l2->vr << 1; l2->vs = (l2->vs + 1) % 128; } else { header[i++] = (l2->vr << 5) | (l2->vs << 1); l2->vs = (l2->vs + 1) % 8; } spin_unlock_irqrestore(&l2->lock, flags); memcpy(skb_push(skb, i), header, i); st->l2.l2l1(st, PH_PULL | INDICATION, skb); test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) { FsmDelTimer(&st->l2.t203, 13); FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 11); } if (!skb_queue_empty(&l2->i_queue) && cansend(st)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } static void l2_st8_got_super(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; int PollFlag, rsp, rnr = 0; unsigned int nr; struct Layer2 *l2 = &st->l2; rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &l2->flag)) rsp = !rsp; skb_pull(skb, l2addrsize(l2)); if (IsRNR(skb->data, st)) { set_peer_busy(l2); rnr = 1; } else clear_peer_busy(l2); if (test_bit(FLG_MOD128, &l2->flag)) { PollFlag = (skb->data[1] & 0x1) == 0x1; nr = skb->data[1] >> 1; } else { PollFlag = (skb->data[0] & 0x10); nr = (skb->data[0] >> 5) & 0x7; } dev_kfree_skb(skb); if (rsp && PollFlag) { if (legalnr(st, nr)) { if (rnr) { restart_t200(st, 15); } else { stop_t200(st, 16); FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 5); setva(st, nr); } invoke_retransmission(st, nr); FsmChangeState(fi, ST_L2_7); if (!skb_queue_empty(&l2->i_queue) && cansend(st)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } else nrerrorrecovery(fi); } else { if (!rsp && PollFlag) enquiry_response(st); if (legalnr(st, nr)) { setva(st, nr); } else nrerrorrecovery(fi); } } static void l2_got_FRMR(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; skb_pull(skb, l2addrsize(&st->l2) + 1); if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */ (IsUA(skb->data) && (fi->state == ST_L2_7))) { st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'K'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &st->l2.flag); } dev_kfree_skb(skb); } static void l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.ui_queue); st->l2.tei = -1; FsmChangeState(fi, ST_L2_1); } static void l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.ui_queue); st->l2.tei = -1; st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL); FsmChangeState(fi, ST_L2_1); } static void l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.i_queue); skb_queue_purge(&st->l2.ui_queue); freewin(st); st->l2.tei = -1; stop_t200(st, 17); st5_dl_release_l2l3(st); FsmChangeState(fi, ST_L2_1); } static void l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.ui_queue); st->l2.tei = -1; stop_t200(st, 18); st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL); FsmChangeState(fi, ST_L2_1); } static void l2_tei_remove(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.i_queue); skb_queue_purge(&st->l2.ui_queue); freewin(st); st->l2.tei = -1; stop_t200(st, 17); FsmDelTimer(&st->l2.t203, 19); st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL); FsmChangeState(fi, ST_L2_1); } static void l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.i_queue); skb_queue_purge(&st->l2.ui_queue); if (test_and_clear_bit(FLG_ESTAB_PEND, &st->l2.flag)) st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL); } static void l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.i_queue); skb_queue_purge(&st->l2.ui_queue); freewin(st); stop_t200(st, 19); st5_dl_release_l2l3(st); FsmChangeState(fi, ST_L2_4); } static void l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.ui_queue); stop_t200(st, 20); st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL); FsmChangeState(fi, ST_L2_4); } static void l2_persistent_da(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.i_queue); skb_queue_purge(&st->l2.ui_queue); freewin(st); stop_t200(st, 19); FsmDelTimer(&st->l2.t203, 19); st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL); FsmChangeState(fi, ST_L2_4); } static void l2_set_own_busy(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; if (!test_and_set_bit(FLG_OWN_BUSY, &st->l2.flag)) { enquiry_cr(st, RNR, RSP, 0); test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); } } static void l2_clear_own_busy(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; if (!test_and_clear_bit(FLG_OWN_BUSY, &st->l2.flag)) { enquiry_cr(st, RR, RSP, 0); test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); } } static void l2_frame_error(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; st->ma.layer(st, MDL_ERROR | INDICATION, arg); } static void l2_frame_error_reest(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; st->ma.layer(st, MDL_ERROR | INDICATION, arg); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &st->l2.flag); } static struct FsmNode L2FnList[] __initdata = { {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign}, {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3}, {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish}, {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3}, {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish}, {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish}, {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release}, {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel}, {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect}, {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect}, {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest}, {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull}, {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue}, {ST_L2_1, EV_L2_DL_UNIT_DATA, l2_queue_ui_assign}, {ST_L2_2, EV_L2_DL_UNIT_DATA, l2_queue_ui}, {ST_L2_3, EV_L2_DL_UNIT_DATA, l2_queue_ui}, {ST_L2_4, EV_L2_DL_UNIT_DATA, l2_send_ui}, {ST_L2_5, EV_L2_DL_UNIT_DATA, l2_send_ui}, {ST_L2_6, EV_L2_DL_UNIT_DATA, l2_send_ui}, {ST_L2_7, EV_L2_DL_UNIT_DATA, l2_send_ui}, {ST_L2_8, EV_L2_DL_UNIT_DATA, l2_send_ui}, {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei}, {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei}, {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei}, {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove}, {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove}, {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove}, {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove}, {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove}, {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove}, {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove}, {ST_L2_4, EV_L2_SABME, l2_start_multi}, {ST_L2_5, EV_L2_SABME, l2_send_UA}, {ST_L2_6, EV_L2_SABME, l2_send_DM}, {ST_L2_7, EV_L2_SABME, l2_restart_multi}, {ST_L2_8, EV_L2_SABME, l2_restart_multi}, {ST_L2_4, EV_L2_DISC, l2_send_DM}, {ST_L2_5, EV_L2_DISC, l2_send_DM}, {ST_L2_6, EV_L2_DISC, l2_send_UA}, {ST_L2_7, EV_L2_DISC, l2_stop_multi}, {ST_L2_8, EV_L2_DISC, l2_stop_multi}, {ST_L2_4, EV_L2_UA, l2_mdl_error_ua}, {ST_L2_5, EV_L2_UA, l2_connected}, {ST_L2_6, EV_L2_UA, l2_released}, {ST_L2_7, EV_L2_UA, l2_mdl_error_ua}, {ST_L2_8, EV_L2_UA, l2_mdl_error_ua}, {ST_L2_4, EV_L2_DM, l2_reestablish}, {ST_L2_5, EV_L2_DM, l2_st5_dm_release}, {ST_L2_6, EV_L2_DM, l2_st6_dm_release}, {ST_L2_7, EV_L2_DM, l2_mdl_error_dm}, {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm}, {ST_L2_1, EV_L2_UI, l2_got_ui}, {ST_L2_2, EV_L2_UI, l2_got_ui}, {ST_L2_3, EV_L2_UI, l2_got_ui}, {ST_L2_4, EV_L2_UI, l2_got_ui}, {ST_L2_5, EV_L2_UI, l2_got_ui}, {ST_L2_6, EV_L2_UI, l2_got_ui}, {ST_L2_7, EV_L2_UI, l2_got_ui}, {ST_L2_8, EV_L2_UI, l2_got_ui}, {ST_L2_7, EV_L2_FRMR, l2_got_FRMR}, {ST_L2_8, EV_L2_FRMR, l2_got_FRMR}, {ST_L2_7, EV_L2_SUPER, l2_st7_got_super}, {ST_L2_8, EV_L2_SUPER, l2_st8_got_super}, {ST_L2_7, EV_L2_I, l2_got_iframe}, {ST_L2_8, EV_L2_I, l2_got_iframe}, {ST_L2_5, EV_L2_T200, l2_st5_tout_200}, {ST_L2_6, EV_L2_T200, l2_st6_tout_200}, {ST_L2_7, EV_L2_T200, l2_st7_tout_200}, {ST_L2_8, EV_L2_T200, l2_st8_tout_200}, {ST_L2_7, EV_L2_T203, l2_st7_tout_203}, {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue}, {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy}, {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy}, {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy}, {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy}, {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error}, {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error}, {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error}, {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest}, {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest}, {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da}, {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove}, {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove}, {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da}, {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da}, {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da}, {ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da}, {ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da}, }; static void isdnl2_l1l2(struct PStack *st, int pr, void *arg) { struct sk_buff *skb = arg; u_char *datap; int ret = 1, len; int c = 0; switch (pr) { case (PH_DATA | INDICATION): datap = skb->data; len = l2addrsize(&st->l2); if (skb->len > len) datap += len; else { FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *) 'N'); dev_kfree_skb(skb); return; } if (!(*datap & 1)) { /* I-Frame */ if (!(c = iframe_error(st, skb))) ret = FsmEvent(&st->l2.l2m, EV_L2_I, skb); } else if (IsSFrame(datap, st)) { /* S-Frame */ if (!(c = super_error(st, skb))) ret = FsmEvent(&st->l2.l2m, EV_L2_SUPER, skb); } else if (IsUI(datap)) { if (!(c = UI_error(st, skb))) ret = FsmEvent(&st->l2.l2m, EV_L2_UI, skb); } else if (IsSABME(datap, st)) { if (!(c = unnum_error(st, skb, CMD))) ret = FsmEvent(&st->l2.l2m, EV_L2_SABME, skb); } else if (IsUA(datap)) { if (!(c = unnum_error(st, skb, RSP))) ret = FsmEvent(&st->l2.l2m, EV_L2_UA, skb); } else if (IsDISC(datap)) { if (!(c = unnum_error(st, skb, CMD))) ret = FsmEvent(&st->l2.l2m, EV_L2_DISC, skb); } else if (IsDM(datap)) { if (!(c = unnum_error(st, skb, RSP))) ret = FsmEvent(&st->l2.l2m, EV_L2_DM, skb); } else if (IsFRMR(datap)) { if (!(c = FRMR_error(st, skb))) ret = FsmEvent(&st->l2.l2m, EV_L2_FRMR, skb); } else { FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *) 'L'); dev_kfree_skb(skb); ret = 0; } if (c) { dev_kfree_skb(skb); FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *)(long)c); ret = 0; } if (ret) dev_kfree_skb(skb); break; case (PH_PULL | CONFIRM): FsmEvent(&st->l2.l2m, EV_L2_ACK_PULL, arg); break; case (PH_PAUSE | INDICATION): test_and_set_bit(FLG_DCHAN_BUSY, &st->l2.flag); break; case (PH_PAUSE | CONFIRM): test_and_clear_bit(FLG_DCHAN_BUSY, &st->l2.flag); break; case (PH_ACTIVATE | CONFIRM): case (PH_ACTIVATE | INDICATION): test_and_set_bit(FLG_L1_ACTIV, &st->l2.flag); if (test_and_clear_bit(FLG_ESTAB_PEND, &st->l2.flag)) FsmEvent(&st->l2.l2m, EV_L2_DL_ESTABLISH_REQ, arg); break; case (PH_DEACTIVATE | INDICATION): case (PH_DEACTIVATE | CONFIRM): test_and_clear_bit(FLG_L1_ACTIV, &st->l2.flag); FsmEvent(&st->l2.l2m, EV_L1_DEACTIVATE, arg); break; default: l2m_debug(&st->l2.l2m, "l2 unknown pr %04x", pr); break; } } static void isdnl2_l3l2(struct PStack *st, int pr, void *arg) { switch (pr) { case (DL_DATA | REQUEST): if (FsmEvent(&st->l2.l2m, EV_L2_DL_DATA, arg)) { dev_kfree_skb((struct sk_buff *) arg); } break; case (DL_UNIT_DATA | REQUEST): if (FsmEvent(&st->l2.l2m, EV_L2_DL_UNIT_DATA, arg)) { dev_kfree_skb((struct sk_buff *) arg); } break; case (DL_ESTABLISH | REQUEST): if (test_bit(FLG_L1_ACTIV, &st->l2.flag)) { if (test_bit(FLG_LAPD, &st->l2.flag) || test_bit(FLG_ORIG, &st->l2.flag)) { FsmEvent(&st->l2.l2m, EV_L2_DL_ESTABLISH_REQ, arg); } } else { if (test_bit(FLG_LAPD, &st->l2.flag) || test_bit(FLG_ORIG, &st->l2.flag)) { test_and_set_bit(FLG_ESTAB_PEND, &st->l2.flag); } st->l2.l2l1(st, PH_ACTIVATE, NULL); } break; case (DL_RELEASE | REQUEST): if (test_bit(FLG_LAPB, &st->l2.flag)) { st->l2.l2l1(st, PH_DEACTIVATE, NULL); } FsmEvent(&st->l2.l2m, EV_L2_DL_RELEASE_REQ, arg); break; case (MDL_ASSIGN | REQUEST): FsmEvent(&st->l2.l2m, EV_L2_MDL_ASSIGN, arg); break; case (MDL_REMOVE | REQUEST): FsmEvent(&st->l2.l2m, EV_L2_MDL_REMOVE, arg); break; case (MDL_ERROR | RESPONSE): FsmEvent(&st->l2.l2m, EV_L2_MDL_ERROR, arg); break; } } void releasestack_isdnl2(struct PStack *st) { FsmDelTimer(&st->l2.t200, 21); FsmDelTimer(&st->l2.t203, 16); skb_queue_purge(&st->l2.i_queue); skb_queue_purge(&st->l2.ui_queue); ReleaseWin(&st->l2); } static void l2m_debug(struct FsmInst *fi, char *fmt, ...) { va_list args; struct PStack *st = fi->userdata; va_start(args, fmt); VHiSax_putstatus(st->l1.hardware, st->l2.debug_id, fmt, args); va_end(args); } void setstack_isdnl2(struct PStack *st, char *debug_id) { spin_lock_init(&st->l2.lock); st->l1.l1l2 = isdnl2_l1l2; st->l3.l3l2 = isdnl2_l3l2; skb_queue_head_init(&st->l2.i_queue); skb_queue_head_init(&st->l2.ui_queue); InitWin(&st->l2); st->l2.debug = 0; st->l2.l2m.fsm = &l2fsm; if (test_bit(FLG_LAPB, &st->l2.flag)) st->l2.l2m.state = ST_L2_4; else st->l2.l2m.state = ST_L2_1; st->l2.l2m.debug = 0; st->l2.l2m.userdata = st; st->l2.l2m.userint = 0; st->l2.l2m.printdebug = l2m_debug; strcpy(st->l2.debug_id, debug_id); FsmInitTimer(&st->l2.l2m, &st->l2.t200); FsmInitTimer(&st->l2.l2m, &st->l2.t203); } static void transl2_l3l2(struct PStack *st, int pr, void *arg) { switch (pr) { case (DL_DATA | REQUEST): case (DL_UNIT_DATA | REQUEST): st->l2.l2l1(st, PH_DATA | REQUEST, arg); break; case (DL_ESTABLISH | REQUEST): st->l2.l2l1(st, PH_ACTIVATE | REQUEST, NULL); break; case (DL_RELEASE | REQUEST): st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL); break; } } void setstack_transl2(struct PStack *st) { st->l3.l3l2 = transl2_l3l2; } void releasestack_transl2(struct PStack *st) { } int __init Isdnl2New(void) { l2fsm.state_count = L2_STATE_COUNT; l2fsm.event_count = L2_EVENT_COUNT; l2fsm.strEvent = strL2Event; l2fsm.strState = strL2State; return FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList)); } void Isdnl2Free(void) { FsmFree(&l2fsm); }
gpl-2.0
Renesas-EMEV2/Kernel
drivers/net/atl1e/atl1e_param.c
9290
7333
/* * Copyright(c) 2007 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/netdevice.h> #include "atl1e.h" /* This is the only thing that needs to be changed to adjust the * maximum number of ports that the driver can manage. */ #define ATL1E_MAX_NIC 32 #define OPTION_UNSET -1 #define OPTION_DISABLED 0 #define OPTION_ENABLED 1 /* All parameters are treated the same, as an integer array of values. * This macro just reduces the need to repeat the same declaration code * over and over (plus this helps to avoid typo bugs). */ #define ATL1E_PARAM_INIT { [0 ... ATL1E_MAX_NIC] = OPTION_UNSET } #define ATL1E_PARAM(x, desc) \ static int __devinitdata x[ATL1E_MAX_NIC + 1] = ATL1E_PARAM_INIT; \ static unsigned int num_##x; \ module_param_array_named(x, x, int, &num_##x, 0); \ MODULE_PARM_DESC(x, desc); /* Transmit Memory count * * Valid Range: 64-2048 * * Default Value: 128 */ #define ATL1E_MIN_TX_DESC_CNT 32 #define ATL1E_MAX_TX_DESC_CNT 1020 #define ATL1E_DEFAULT_TX_DESC_CNT 128 ATL1E_PARAM(tx_desc_cnt, "Transmit description count"); /* Receive Memory Block Count * * Valid Range: 16-512 * * Default Value: 128 */ #define ATL1E_MIN_RX_MEM_SIZE 8 /* 8KB */ #define ATL1E_MAX_RX_MEM_SIZE 1024 /* 1MB */ #define ATL1E_DEFAULT_RX_MEM_SIZE 256 /* 128KB */ ATL1E_PARAM(rx_mem_size, "memory size of rx buffer(KB)"); /* User Specified MediaType Override * * Valid Range: 0-5 * - 0 - auto-negotiate at all supported speeds * - 1 - only link at 100Mbps Full Duplex * - 2 - only link at 100Mbps Half Duplex * - 3 - only link at 10Mbps Full Duplex * - 4 - only link at 10Mbps Half Duplex * Default Value: 0 */ ATL1E_PARAM(media_type, "MediaType Select"); /* Interrupt Moderate Timer in units of 2 us * * Valid Range: 10-65535 * * Default Value: 45000(90ms) */ #define INT_MOD_DEFAULT_CNT 100 /* 200us */ #define INT_MOD_MAX_CNT 65000 #define INT_MOD_MIN_CNT 50 ATL1E_PARAM(int_mod_timer, "Interrupt Moderator Timer"); #define AUTONEG_ADV_DEFAULT 0x2F #define AUTONEG_ADV_MASK 0x2F #define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL #define FLASH_VENDOR_DEFAULT 0 #define FLASH_VENDOR_MIN 0 #define FLASH_VENDOR_MAX 2 struct atl1e_option { enum { enable_option, range_option, list_option } type; char *name; char *err; int def; union { struct { /* range_option info */ int min; int max; } r; struct { /* list_option info */ int nr; struct atl1e_opt_list { int i; char *str; } *p; } l; } arg; }; static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct atl1e_adapter *adapter) { if (*value == OPTION_UNSET) { *value = opt->def; return 0; } switch (opt->type) { case enable_option: switch (*value) { case OPTION_ENABLED: netdev_info(adapter->netdev, "%s Enabled\n", opt->name); return 0; case OPTION_DISABLED: netdev_info(adapter->netdev, "%s Disabled\n", opt->name); return 0; } break; case range_option: if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { netdev_info(adapter->netdev, "%s set to %i\n", opt->name, *value); return 0; } break; case list_option:{ int i; struct atl1e_opt_list *ent; for (i = 0; i < opt->arg.l.nr; i++) { ent = &opt->arg.l.p[i]; if (*value == ent->i) { if (ent->str[0] != '\0') netdev_info(adapter->netdev, "%s\n", ent->str); return 0; } } break; } default: BUG(); } netdev_info(adapter->netdev, "Invalid %s specified (%i) %s\n", opt->name, *value, opt->err); *value = opt->def; return -1; } /* * atl1e_check_options - Range Checking for Command Line Parameters * @adapter: board private structure * * This routine checks all command line parameters for valid user * input. If an invalid value is given, or if no user specified * value exists, a default value is used. The final value is stored * in a variable in the adapter structure. */ void __devinit atl1e_check_options(struct atl1e_adapter *adapter) { int bd = adapter->bd_number; if (bd >= ATL1E_MAX_NIC) { netdev_notice(adapter->netdev, "no configuration for board #%i\n", bd); netdev_notice(adapter->netdev, "Using defaults for all values\n"); } { /* Transmit Ring Size */ struct atl1e_option opt = { .type = range_option, .name = "Transmit Ddescription Count", .err = "using default of " __MODULE_STRING(ATL1E_DEFAULT_TX_DESC_CNT), .def = ATL1E_DEFAULT_TX_DESC_CNT, .arg = { .r = { .min = ATL1E_MIN_TX_DESC_CNT, .max = ATL1E_MAX_TX_DESC_CNT} } }; int val; if (num_tx_desc_cnt > bd) { val = tx_desc_cnt[bd]; atl1e_validate_option(&val, &opt, adapter); adapter->tx_ring.count = (u16) val & 0xFFFC; } else adapter->tx_ring.count = (u16)opt.def; } { /* Receive Memory Block Count */ struct atl1e_option opt = { .type = range_option, .name = "Memory size of rx buffer(KB)", .err = "using default of " __MODULE_STRING(ATL1E_DEFAULT_RX_MEM_SIZE), .def = ATL1E_DEFAULT_RX_MEM_SIZE, .arg = { .r = { .min = ATL1E_MIN_RX_MEM_SIZE, .max = ATL1E_MAX_RX_MEM_SIZE} } }; int val; if (num_rx_mem_size > bd) { val = rx_mem_size[bd]; atl1e_validate_option(&val, &opt, adapter); adapter->rx_ring.page_size = (u32)val * 1024; } else { adapter->rx_ring.page_size = (u32)opt.def * 1024; } } { /* Interrupt Moderate Timer */ struct atl1e_option opt = { .type = range_option, .name = "Interrupt Moderate Timer", .err = "using default of " __MODULE_STRING(INT_MOD_DEFAULT_CNT), .def = INT_MOD_DEFAULT_CNT, .arg = { .r = { .min = INT_MOD_MIN_CNT, .max = INT_MOD_MAX_CNT} } } ; int val; if (num_int_mod_timer > bd) { val = int_mod_timer[bd]; atl1e_validate_option(&val, &opt, adapter); adapter->hw.imt = (u16) val; } else adapter->hw.imt = (u16)(opt.def); } { /* MediaType */ struct atl1e_option opt = { .type = range_option, .name = "Speed/Duplex Selection", .err = "using default of " __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR), .def = MEDIA_TYPE_AUTO_SENSOR, .arg = { .r = { .min = MEDIA_TYPE_AUTO_SENSOR, .max = MEDIA_TYPE_10M_HALF} } } ; int val; if (num_media_type > bd) { val = media_type[bd]; atl1e_validate_option(&val, &opt, adapter); adapter->hw.media_type = (u16) val; } else adapter->hw.media_type = (u16)(opt.def); } }
gpl-2.0
aidfarh/android_kernel_lge_palman
arch/mips/emma/markeins/platform.c
9290
5027
/* * Copyright(C) MontaVista Software Inc, 2006 * * Author: dmitry pervushin <dpervushin@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/serial_8250.h> #include <linux/mtd/physmap.h> #include <asm/cpu.h> #include <asm/bootinfo.h> #include <asm/addrspace.h> #include <asm/time.h> #include <asm/bcache.h> #include <asm/irq.h> #include <asm/reboot.h> #include <asm/traps.h> #include <asm/emma/emma2rh.h> #define I2C_EMMA2RH "emma2rh-iic" /* must be in sync with IIC driver */ static struct resource i2c_emma_resources_0[] = { { .name = NULL, .start = EMMA2RH_IRQ_PIIC0, .end = EMMA2RH_IRQ_PIIC0, .flags = IORESOURCE_IRQ }, { .name = NULL, .start = EMMA2RH_PIIC0_BASE, .end = EMMA2RH_PIIC0_BASE + 0x1000, .flags = 0 }, }; struct resource i2c_emma_resources_1[] = { { .name = NULL, .start = EMMA2RH_IRQ_PIIC1, .end = EMMA2RH_IRQ_PIIC1, .flags = IORESOURCE_IRQ }, { .name = NULL, .start = EMMA2RH_PIIC1_BASE, .end = EMMA2RH_PIIC1_BASE + 0x1000, .flags = 0 }, }; struct resource i2c_emma_resources_2[] = { { .name = NULL, .start = EMMA2RH_IRQ_PIIC2, .end = EMMA2RH_IRQ_PIIC2, .flags = IORESOURCE_IRQ }, { .name = NULL, .start = EMMA2RH_PIIC2_BASE, .end = EMMA2RH_PIIC2_BASE + 0x1000, .flags = 0 }, }; struct platform_device i2c_emma_devices[] = { [0] = { .name = I2C_EMMA2RH, .id = 0, .resource = i2c_emma_resources_0, .num_resources = ARRAY_SIZE(i2c_emma_resources_0), }, [1] = { .name = I2C_EMMA2RH, .id = 1, .resource = i2c_emma_resources_1, .num_resources = ARRAY_SIZE(i2c_emma_resources_1), }, [2] = { .name = I2C_EMMA2RH, .id = 2, .resource = i2c_emma_resources_2, .num_resources = ARRAY_SIZE(i2c_emma_resources_2), }, }; #define EMMA2RH_SERIAL_CLOCK 18544000 #define EMMA2RH_SERIAL_FLAGS UPF_BOOT_AUTOCONF | UPF_SKIP_TEST static struct plat_serial8250_port platform_serial_ports[] = { [0] = { .membase= (void __iomem*)KSEG1ADDR(EMMA2RH_PFUR0_BASE + 3), .mapbase = EMMA2RH_PFUR0_BASE + 3, .irq = EMMA2RH_IRQ_PFUR0, .uartclk = EMMA2RH_SERIAL_CLOCK, .regshift = 4, .iotype = UPIO_MEM, .flags = EMMA2RH_SERIAL_FLAGS, }, [1] = { .membase = (void __iomem*)KSEG1ADDR(EMMA2RH_PFUR1_BASE + 3), .mapbase = EMMA2RH_PFUR1_BASE + 3, .irq = EMMA2RH_IRQ_PFUR1, .uartclk = EMMA2RH_SERIAL_CLOCK, .regshift = 4, .iotype = UPIO_MEM, .flags = EMMA2RH_SERIAL_FLAGS, }, [2] = { .membase = (void __iomem*)KSEG1ADDR(EMMA2RH_PFUR2_BASE + 3), .mapbase = EMMA2RH_PFUR2_BASE + 3, .irq = EMMA2RH_IRQ_PFUR2, .uartclk = EMMA2RH_SERIAL_CLOCK, .regshift = 4, .iotype = UPIO_MEM, .flags = EMMA2RH_SERIAL_FLAGS, }, [3] = { .flags = 0, }, }; static struct platform_device serial_emma = { .name = "serial8250", .dev = { .platform_data = &platform_serial_ports, }, }; static struct mtd_partition markeins_parts[] = { [0] = { .name = "RootFS", .offset = 0x00000000, .size = 0x00c00000, }, [1] = { .name = "boot code area", .offset = MTDPART_OFS_APPEND, .size = 0x00100000, }, [2] = { .name = "kernel image", .offset = MTDPART_OFS_APPEND, .size = 0x00300000, }, [3] = { .name = "RootFS2", .offset = MTDPART_OFS_APPEND, .size = 0x00c00000, }, [4] = { .name = "boot code area2", .offset = MTDPART_OFS_APPEND, .size = 0x00100000, }, [5] = { .name = "kernel image2", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct physmap_flash_data markeins_flash_data = { .width = 2, .nr_parts = ARRAY_SIZE(markeins_parts), .parts = markeins_parts }; static struct resource markeins_flash_resource = { .start = 0x1e000000, .end = 0x02000000, .flags = IORESOURCE_MEM }; static struct platform_device markeins_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &markeins_flash_data, }, .num_resources = 1, .resource = &markeins_flash_resource, }; static struct platform_device *devices[] = { i2c_emma_devices, i2c_emma_devices + 1, i2c_emma_devices + 2, &serial_emma, &markeins_flash_device, }; static int __init platform_devices_setup(void) { return platform_add_devices(devices, ARRAY_SIZE(devices)); } arch_initcall(platform_devices_setup);
gpl-2.0
bboyfeiyu/linux
net/irda/irlmp_frame.c
11594
14337
/********************************************************************* * * Filename: irlmp_frame.c * Version: 0.9 * Description: IrLMP frame implementation * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Tue Aug 19 02:09:59 1997 * Modified at: Mon Dec 13 13:41:12 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no> * All Rights Reserved. * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/skbuff.h> #include <linux/kernel.h> #include <net/irda/irda.h> #include <net/irda/irlap.h> #include <net/irda/timer.h> #include <net/irda/irlmp.h> #include <net/irda/irlmp_frame.h> #include <net/irda/discovery.h> static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap, __u8 slsap, int status, hashbin_t *); inline void irlmp_send_data_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap, int expedited, struct sk_buff *skb) { skb->data[0] = dlsap; skb->data[1] = slsap; if (expedited) { IRDA_DEBUG(4, "%s(), sending expedited data\n", __func__); irlap_data_request(self->irlap, skb, TRUE); } else irlap_data_request(self->irlap, skb, FALSE); } /* * Function irlmp_send_lcf_pdu (dlsap, slsap, opcode,skb) * * Send Link Control Frame to IrLAP */ void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap, __u8 opcode, struct sk_buff *skb) { __u8 *frame; IRDA_DEBUG(2, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); frame = skb->data; frame[0] = dlsap | CONTROL_BIT; frame[1] = slsap; frame[2] = opcode; if (opcode == DISCONNECT) frame[3] = 0x01; /* Service user request */ else frame[3] = 0x00; /* rsvd */ irlap_data_request(self->irlap, skb, FALSE); } /* * Function irlmp_input (skb) * * Used by IrLAP to pass received data frames to IrLMP layer * */ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, int unreliable) { struct lsap_cb *lsap; __u8 slsap_sel; /* Source (this) LSAP address */ __u8 dlsap_sel; /* Destination LSAP address */ __u8 *fp; IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); IRDA_ASSERT(skb->len > 2, return;); fp = skb->data; /* * The next statements may be confusing, but we do this so that * destination LSAP of received frame is source LSAP in our view */ slsap_sel = fp[0] & LSAP_MASK; dlsap_sel = fp[1]; /* * Check if this is an incoming connection, since we must deal with * it in a different way than other established connections. */ if ((fp[0] & CONTROL_BIT) && (fp[2] == CONNECT_CMD)) { IRDA_DEBUG(3, "%s(), incoming connection, " "source LSAP=%d, dest LSAP=%d\n", __func__, slsap_sel, dlsap_sel); /* Try to find LSAP among the unconnected LSAPs */ lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, CONNECT_CMD, irlmp->unconnected_lsaps); /* Maybe LSAP was already connected, so try one more time */ if (!lsap) { IRDA_DEBUG(1, "%s(), incoming connection for LSAP already connected\n", __func__); lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0, self->lsaps); } } else lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0, self->lsaps); if (lsap == NULL) { IRDA_DEBUG(2, "IrLMP, Sorry, no LSAP for received frame!\n"); IRDA_DEBUG(2, "%s(), slsap_sel = %02x, dlsap_sel = %02x\n", __func__, slsap_sel, dlsap_sel); if (fp[0] & CONTROL_BIT) { IRDA_DEBUG(2, "%s(), received control frame %02x\n", __func__, fp[2]); } else { IRDA_DEBUG(2, "%s(), received data frame\n", __func__); } return; } /* * Check if we received a control frame? */ if (fp[0] & CONTROL_BIT) { switch (fp[2]) { case CONNECT_CMD: lsap->lap = self; irlmp_do_lsap_event(lsap, LM_CONNECT_INDICATION, skb); break; case CONNECT_CNF: irlmp_do_lsap_event(lsap, LM_CONNECT_CONFIRM, skb); break; case DISCONNECT: IRDA_DEBUG(4, "%s(), Disconnect indication!\n", __func__); irlmp_do_lsap_event(lsap, LM_DISCONNECT_INDICATION, skb); break; case ACCESSMODE_CMD: IRDA_DEBUG(0, "Access mode cmd not implemented!\n"); break; case ACCESSMODE_CNF: IRDA_DEBUG(0, "Access mode cnf not implemented!\n"); break; default: IRDA_DEBUG(0, "%s(), Unknown control frame %02x\n", __func__, fp[2]); break; } } else if (unreliable) { /* Optimize and bypass the state machine if possible */ if (lsap->lsap_state == LSAP_DATA_TRANSFER_READY) irlmp_udata_indication(lsap, skb); else irlmp_do_lsap_event(lsap, LM_UDATA_INDICATION, skb); } else { /* Optimize and bypass the state machine if possible */ if (lsap->lsap_state == LSAP_DATA_TRANSFER_READY) irlmp_data_indication(lsap, skb); else irlmp_do_lsap_event(lsap, LM_DATA_INDICATION, skb); } } /* * Function irlmp_link_unitdata_indication (self, skb) * * * */ #ifdef CONFIG_IRDA_ULTRA void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb) { struct lsap_cb *lsap; __u8 slsap_sel; /* Source (this) LSAP address */ __u8 dlsap_sel; /* Destination LSAP address */ __u8 pid; /* Protocol identifier */ __u8 *fp; unsigned long flags; IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); IRDA_ASSERT(skb->len > 2, return;); fp = skb->data; /* * The next statements may be confusing, but we do this so that * destination LSAP of received frame is source LSAP in our view */ slsap_sel = fp[0] & LSAP_MASK; dlsap_sel = fp[1]; pid = fp[2]; if (pid & 0x80) { IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); return; } /* Check if frame is addressed to the connectionless LSAP */ if ((slsap_sel != LSAP_CONNLESS) || (dlsap_sel != LSAP_CONNLESS)) { IRDA_DEBUG(0, "%s(), dropping frame!\n", __func__); return; } /* Search the connectionless LSAP */ spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags); lsap = (struct lsap_cb *) hashbin_get_first(irlmp->unconnected_lsaps); while (lsap != NULL) { /* * Check if source LSAP and dest LSAP selectors and PID match. */ if ((lsap->slsap_sel == slsap_sel) && (lsap->dlsap_sel == dlsap_sel) && (lsap->pid == pid)) { break; } lsap = (struct lsap_cb *) hashbin_get_next(irlmp->unconnected_lsaps); } spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags); if (lsap) irlmp_connless_data_indication(lsap, skb); else { IRDA_DEBUG(0, "%s(), found no matching LSAP!\n", __func__); } } #endif /* CONFIG_IRDA_ULTRA */ /* * Function irlmp_link_disconnect_indication (reason, userdata) * * IrLAP has disconnected * */ void irlmp_link_disconnect_indication(struct lap_cb *lap, struct irlap_cb *irlap, LAP_REASON reason, struct sk_buff *skb) { IRDA_DEBUG(2, "%s()\n", __func__); IRDA_ASSERT(lap != NULL, return;); IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return;); lap->reason = reason; lap->daddr = DEV_ADDR_ANY; /* FIXME: must do something with the skb if any */ /* * Inform station state machine */ irlmp_do_lap_event(lap, LM_LAP_DISCONNECT_INDICATION, NULL); } /* * Function irlmp_link_connect_indication (qos) * * Incoming LAP connection! * */ void irlmp_link_connect_indication(struct lap_cb *self, __u32 saddr, __u32 daddr, struct qos_info *qos, struct sk_buff *skb) { IRDA_DEBUG(4, "%s()\n", __func__); /* Copy QoS settings for this session */ self->qos = qos; /* Update destination device address */ self->daddr = daddr; IRDA_ASSERT(self->saddr == saddr, return;); irlmp_do_lap_event(self, LM_LAP_CONNECT_INDICATION, skb); } /* * Function irlmp_link_connect_confirm (qos) * * LAP connection confirmed! * */ void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos, struct sk_buff *skb) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); IRDA_ASSERT(qos != NULL, return;); /* Don't need use the skb for now */ /* Copy QoS settings for this session */ self->qos = qos; irlmp_do_lap_event(self, LM_LAP_CONNECT_CONFIRM, NULL); } /* * Function irlmp_link_discovery_indication (self, log) * * Device is discovering us * * It's not an answer to our own discoveries, just another device trying * to perform discovery, but we don't want to miss the opportunity * to exploit this information, because : * o We may not actively perform discovery (just passive discovery) * o This type of discovery is much more reliable. In some cases, it * seem that less than 50% of our discoveries get an answer, while * we always get ~100% of these. * o Make faster discovery, statistically divide time of discovery * events by 2 (important for the latency aspect and user feel) * o Even is we do active discovery, the other node might not * answer our discoveries (ex: Palm). The Palm will just perform * one active discovery and connect directly to us. * * However, when both devices discover each other, they might attempt to * connect to each other following the discovery event, and it would create * collisions on the medium (SNRM battle). * The "fix" for that is to disable all connection requests in IrLAP * for 100ms after a discovery indication by setting the media_busy flag. * Previously, we used to postpone the event which was quite ugly. Now * that IrLAP takes care of this problem, just pass the event up... * * Jean II */ void irlmp_link_discovery_indication(struct lap_cb *self, discovery_t *discovery) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); /* Add to main log, cleanup */ irlmp_add_discovery(irlmp->cachelog, discovery); /* Just handle it the same way as a discovery confirm, * bypass the LM_LAP state machine (see below) */ irlmp_discovery_confirm(irlmp->cachelog, DISCOVERY_PASSIVE); } /* * Function irlmp_link_discovery_confirm (self, log) * * Called by IrLAP with a list of discoveries after the discovery * request has been carried out. A NULL log is received if IrLAP * was unable to carry out the discovery request * */ void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); /* Add to main log, cleanup */ irlmp_add_discovery_log(irlmp->cachelog, log); /* Propagate event to various LSAPs registered for it. * We bypass the LM_LAP state machine because * 1) We do it regardless of the LM_LAP state * 2) It doesn't affect the LM_LAP state * 3) Faster, slimer, simpler, ... * Jean II */ irlmp_discovery_confirm(irlmp->cachelog, DISCOVERY_ACTIVE); } #ifdef CONFIG_IRDA_CACHE_LAST_LSAP static inline void irlmp_update_cache(struct lap_cb *lap, struct lsap_cb *lsap) { /* Prevent concurrent read to get garbage */ lap->cache.valid = FALSE; /* Update cache entry */ lap->cache.dlsap_sel = lsap->dlsap_sel; lap->cache.slsap_sel = lsap->slsap_sel; lap->cache.lsap = lsap; lap->cache.valid = TRUE; } #endif /* * Function irlmp_find_handle (self, dlsap_sel, slsap_sel, status, queue) * * Find handle associated with destination and source LSAP * * Any IrDA connection (LSAP/TSAP) is uniquely identified by * 3 parameters, the local lsap, the remote lsap and the remote address. * We may initiate multiple connections to the same remote service * (they will have different local lsap), a remote device may initiate * multiple connections to the same local service (they will have * different remote lsap), or multiple devices may connect to the same * service and may use the same remote lsap (and they will have * different remote address). * So, where is the remote address ? Each LAP connection is made with * a single remote device, so imply a specific remote address. * Jean II */ static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap_sel, __u8 slsap_sel, int status, hashbin_t *queue) { struct lsap_cb *lsap; unsigned long flags; /* * Optimize for the common case. We assume that the last frame * received is in the same connection as the last one, so check in * cache first to avoid the linear search */ #ifdef CONFIG_IRDA_CACHE_LAST_LSAP if ((self->cache.valid) && (self->cache.slsap_sel == slsap_sel) && (self->cache.dlsap_sel == dlsap_sel)) { return self->cache.lsap; } #endif spin_lock_irqsave(&queue->hb_spinlock, flags); lsap = (struct lsap_cb *) hashbin_get_first(queue); while (lsap != NULL) { /* * If this is an incoming connection, then the destination * LSAP selector may have been specified as LM_ANY so that * any client can connect. In that case we only need to check * if the source LSAP (in our view!) match! */ if ((status == CONNECT_CMD) && (lsap->slsap_sel == slsap_sel) && (lsap->dlsap_sel == LSAP_ANY)) { /* This is where the dest lsap sel is set on incoming * lsaps */ lsap->dlsap_sel = dlsap_sel; break; } /* * Check if source LSAP and dest LSAP selectors match. */ if ((lsap->slsap_sel == slsap_sel) && (lsap->dlsap_sel == dlsap_sel)) break; lsap = (struct lsap_cb *) hashbin_get_next(queue); } #ifdef CONFIG_IRDA_CACHE_LAST_LSAP if(lsap) irlmp_update_cache(self, lsap); #endif spin_unlock_irqrestore(&queue->hb_spinlock, flags); /* Return what we've found or NULL */ return lsap; }
gpl-2.0
s2k7/chromeos-tegra
arch/mips/kernel/traps.c
75
43461
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle * Copyright (C) 1995, 1996 Paul M. Antoine * Copyright (C) 1998 Ulf Carlsson * Copyright (C) 1999 Silicon Graphics, Inc. * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000, 01 MIPS Technologies, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki */ #include <linux/bug.h> #include <linux/compiler.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/kallsyms.h> #include <linux/bootmem.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/kgdb.h> #include <linux/kdebug.h> #include <linux/kprobes.h> #include <linux/notifier.h> #include <linux/kdb.h> #include <asm/bootinfo.h> #include <asm/branch.h> #include <asm/break.h> #include <asm/cop2.h> #include <asm/cpu.h> #include <asm/dsp.h> #include <asm/fpu.h> #include <asm/fpu_emulator.h> #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/module.h> #include <asm/pgtable.h> #include <asm/ptrace.h> #include <asm/sections.h> #include <asm/system.h> #include <asm/tlbdebug.h> #include <asm/traps.h> #include <asm/uaccess.h> #include <asm/watch.h> #include <asm/mmu_context.h> #include <asm/types.h> #include <asm/stacktrace.h> #include <asm/irq.h> #include <asm/uasm.h> extern void check_wait(void); extern asmlinkage void r4k_wait(void); extern asmlinkage void rollback_handle_int(void); extern asmlinkage void handle_int(void); extern asmlinkage void handle_tlbm(void); extern asmlinkage void handle_tlbl(void); extern asmlinkage void handle_tlbs(void); extern asmlinkage void handle_adel(void); extern asmlinkage void handle_ades(void); extern asmlinkage void handle_ibe(void); extern asmlinkage void handle_dbe(void); extern asmlinkage void handle_sys(void); extern asmlinkage void handle_bp(void); extern asmlinkage void handle_ri(void); extern asmlinkage void handle_ri_rdhwr_vivt(void); extern asmlinkage void handle_ri_rdhwr(void); extern asmlinkage void handle_cpu(void); extern asmlinkage void handle_ov(void); extern asmlinkage void handle_tr(void); extern asmlinkage void handle_fpe(void); extern asmlinkage void handle_mdmx(void); extern asmlinkage void handle_watch(void); extern asmlinkage void handle_mt(void); extern asmlinkage void handle_dsp(void); extern asmlinkage void handle_mcheck(void); extern asmlinkage void handle_reserved(void); extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, int has_fpu); void (*board_be_init)(void); int (*board_be_handler)(struct pt_regs *regs, int is_fixup); void (*board_nmi_handler_setup)(void); void (*board_ejtag_handler_setup)(void); void (*board_bind_eic_interrupt)(int irq, int regset); static void show_raw_backtrace(unsigned long reg29) { unsigned long *sp = (unsigned long *)(reg29 & ~3); unsigned long addr; printk("Call Trace:"); #ifdef CONFIG_KALLSYMS printk("\n"); #endif while (!kstack_end(sp)) { unsigned long __user *p = (unsigned long __user *)(unsigned long)sp++; if (__get_user(addr, p)) { printk(" (Bad stack address)"); break; } if (__kernel_text_address(addr)) print_ip_sym(addr); } printk("\n"); } #ifdef CONFIG_KALLSYMS int raw_show_trace; static int __init set_raw_show_trace(char *str) { raw_show_trace = 1; return 1; } __setup("raw_show_trace", set_raw_show_trace); #endif static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) { unsigned long sp = regs->regs[29]; unsigned long ra = regs->regs[31]; unsigned long pc = regs->cp0_epc; if (raw_show_trace || !__kernel_text_address(pc)) { show_raw_backtrace(sp); return; } printk("Call Trace:\n"); do { print_ip_sym(pc); pc = unwind_stack(task, &sp, pc, &ra); } while (pc); printk("\n"); } /* * This routine abuses get_user()/put_user() to reference pointers * with at least a bit of error checking ... */ static void show_stacktrace(struct task_struct *task, const struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); long stackdata; int i; unsigned long __user *sp = (unsigned long __user *)regs->regs[29]; printk("Stack :"); i = 0; while ((unsigned long) sp & (PAGE_SIZE - 1)) { if (i && ((i % (64 / field)) == 0)) printk("\n "); if (i > 39) { printk(" ..."); break; } if (__get_user(stackdata, sp++)) { printk(" (Bad stack address)"); break; } printk(" %0*lx", field, stackdata); i++; } printk("\n"); show_backtrace(task, regs); } void show_stack(struct task_struct *task, unsigned long *sp) { struct pt_regs regs; if (sp) { regs.regs[29] = (unsigned long)sp; regs.regs[31] = 0; regs.cp0_epc = 0; } else { if (task && task != current) { regs.regs[29] = task->thread.reg29; regs.regs[31] = 0; regs.cp0_epc = task->thread.reg31; #ifdef CONFIG_KGDB_KDB } else if (atomic_read(&kgdb_active) != -1 && kdb_current_regs) { memcpy(&regs, kdb_current_regs, sizeof(regs)); #endif /* CONFIG_KGDB_KDB */ } else { prepare_frametrace(&regs); } } show_stacktrace(task, &regs); } /* * The architecture-independent dump_stack generator */ void dump_stack(void) { struct pt_regs regs; prepare_frametrace(&regs); show_backtrace(current, &regs); } EXPORT_SYMBOL(dump_stack); static void show_code(unsigned int __user *pc) { long i; unsigned short __user *pc16 = NULL; printk("\nCode:"); if ((unsigned long)pc & 1) pc16 = (unsigned short __user *)((unsigned long)pc & ~1); for(i = -3 ; i < 6 ; i++) { unsigned int insn; if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { printk(" (Bad address in epc)\n"); break; } printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); } } static void __show_regs(const struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); unsigned int cause = regs->cp0_cause; int i; printk("Cpu %d\n", smp_processor_id()); /* * Saved main processor registers */ for (i = 0; i < 32; ) { if ((i % 4) == 0) printk("$%2d :", i); if (i == 0) printk(" %0*lx", field, 0UL); else if (i == 26 || i == 27) printk(" %*s", field, ""); else printk(" %0*lx", field, regs->regs[i]); i++; if ((i % 4) == 0) printk("\n"); } #ifdef CONFIG_CPU_HAS_SMARTMIPS printk("Acx : %0*lx\n", field, regs->acx); #endif printk("Hi : %0*lx\n", field, regs->hi); printk("Lo : %0*lx\n", field, regs->lo); /* * Saved cp0 registers */ printk("epc : %0*lx %pS\n", field, regs->cp0_epc, (void *) regs->cp0_epc); printk(" %s\n", print_tainted()); printk("ra : %0*lx %pS\n", field, regs->regs[31], (void *) regs->regs[31]); printk("Status: %08x ", (uint32_t) regs->cp0_status); if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) { if (regs->cp0_status & ST0_KUO) printk("KUo "); if (regs->cp0_status & ST0_IEO) printk("IEo "); if (regs->cp0_status & ST0_KUP) printk("KUp "); if (regs->cp0_status & ST0_IEP) printk("IEp "); if (regs->cp0_status & ST0_KUC) printk("KUc "); if (regs->cp0_status & ST0_IEC) printk("IEc "); } else { if (regs->cp0_status & ST0_KX) printk("KX "); if (regs->cp0_status & ST0_SX) printk("SX "); if (regs->cp0_status & ST0_UX) printk("UX "); switch (regs->cp0_status & ST0_KSU) { case KSU_USER: printk("USER "); break; case KSU_SUPERVISOR: printk("SUPERVISOR "); break; case KSU_KERNEL: printk("KERNEL "); break; default: printk("BAD_MODE "); break; } if (regs->cp0_status & ST0_ERL) printk("ERL "); if (regs->cp0_status & ST0_EXL) printk("EXL "); if (regs->cp0_status & ST0_IE) printk("IE "); } printk("\n"); printk("Cause : %08x\n", cause); cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; if (1 <= cause && cause <= 5) printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr); printk("PrId : %08x (%s)\n", read_c0_prid(), cpu_name_string()); } /* * FIXME: really the generic show_regs should take a const pointer argument. */ void show_regs(struct pt_regs *regs) { __show_regs((struct pt_regs *)regs); } void show_registers(struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); __show_regs(regs); print_modules(); printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n", current->comm, current->pid, current_thread_info(), current, field, current_thread_info()->tp_value); if (cpu_has_userlocal) { unsigned long tls; tls = read_c0_userlocal(); if (tls != current_thread_info()->tp_value) printk("*HwTLS: %0*lx\n", field, tls); } show_stacktrace(current, regs); show_code((unsigned int __user *) regs->cp0_epc); printk("\n"); } static int regs_to_trapnr(struct pt_regs *regs) { return (regs->cp0_cause >> 2) & 0x1f; } static DEFINE_SPINLOCK(die_lock); void __noreturn die(const char *str, struct pt_regs *regs) { static int die_counter; int sig = SIGSEGV; #ifdef CONFIG_MIPS_MT_SMTC unsigned long dvpret = dvpe(); #endif /* CONFIG_MIPS_MT_SMTC */ notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV); console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); #ifdef CONFIG_MIPS_MT_SMTC mips_mt_regdump(dvpret); #endif /* CONFIG_MIPS_MT_SMTC */ if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) sig = 0; printk("%s[#%d]:\n", str, ++die_counter); show_registers(regs); add_taint(TAINT_DIE); spin_unlock_irq(&die_lock); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) { printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); ssleep(5); panic("Fatal exception"); } do_exit(sig); } extern struct exception_table_entry __start___dbe_table[]; extern struct exception_table_entry __stop___dbe_table[]; __asm__( " .section __dbe_table, \"a\"\n" " .previous \n"); /* Given an address, look for it in the exception tables. */ static const struct exception_table_entry *search_dbe_tables(unsigned long addr) { const struct exception_table_entry *e; e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr); if (!e) e = search_module_dbetables(addr); return e; } asmlinkage void do_be(struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); const struct exception_table_entry *fixup = NULL; int data = regs->cp0_cause & 4; int action = MIPS_BE_FATAL; /* XXX For now. Fixme, this searches the wrong table ... */ if (data && !user_mode(regs)) fixup = search_dbe_tables(exception_epc(regs)); if (fixup) action = MIPS_BE_FIXUP; if (board_be_handler) action = board_be_handler(regs, fixup != NULL); switch (action) { case MIPS_BE_DISCARD: return; case MIPS_BE_FIXUP: if (fixup) { regs->cp0_epc = fixup->nextinsn; return; } break; default: break; } /* * Assume it would be too dangerous to continue ... */ printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n", data ? "Data" : "Instruction", field, regs->cp0_epc, field, regs->regs[31]); if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS) == NOTIFY_STOP) return; die_if_kernel("Oops", regs); force_sig(SIGBUS, current); } /* * ll/sc, rdhwr, sync emulation */ #define OPCODE 0xfc000000 #define BASE 0x03e00000 #define RT 0x001f0000 #define OFFSET 0x0000ffff #define LL 0xc0000000 #define SC 0xe0000000 #define SPEC0 0x00000000 #define SPEC3 0x7c000000 #define RD 0x0000f800 #define FUNC 0x0000003f #define SYNC 0x0000000f #define RDHWR 0x0000003b /* * The ll_bit is cleared by r*_switch.S */ unsigned int ll_bit; struct task_struct *ll_task; static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode) { unsigned long value, __user *vaddr; long offset; /* * analyse the ll instruction that just caused a ri exception * and put the referenced address to addr. */ /* sign extend offset */ offset = opcode & OFFSET; offset <<= 16; offset >>= 16; vaddr = (unsigned long __user *) ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); if ((unsigned long)vaddr & 3) return SIGBUS; if (get_user(value, vaddr)) return SIGSEGV; preempt_disable(); if (ll_task == NULL || ll_task == current) { ll_bit = 1; } else { ll_bit = 0; } ll_task = current; preempt_enable(); regs->regs[(opcode & RT) >> 16] = value; return 0; } static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode) { unsigned long __user *vaddr; unsigned long reg; long offset; /* * analyse the sc instruction that just caused a ri exception * and put the referenced address to addr. */ /* sign extend offset */ offset = opcode & OFFSET; offset <<= 16; offset >>= 16; vaddr = (unsigned long __user *) ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); reg = (opcode & RT) >> 16; if ((unsigned long)vaddr & 3) return SIGBUS; preempt_disable(); if (ll_bit == 0 || ll_task != current) { regs->regs[reg] = 0; preempt_enable(); return 0; } preempt_enable(); if (put_user(regs->regs[reg], vaddr)) return SIGSEGV; regs->regs[reg] = 1; return 0; } /* * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both * opcodes are supposed to result in coprocessor unusable exceptions if * executed on ll/sc-less processors. That's the theory. In practice a * few processors such as NEC's VR4100 throw reserved instruction exceptions * instead, so we're doing the emulation thing in both exception handlers. */ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) { if ((opcode & OPCODE) == LL) return simulate_ll(regs, opcode); if ((opcode & OPCODE) == SC) return simulate_sc(regs, opcode); return -1; /* Must be something else ... */ } /* * Simulate trapping 'rdhwr' instructions to provide user accessible * registers not implemented in hardware. */ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) { struct thread_info *ti = task_thread_info(current); if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { int rd = (opcode & RD) >> 11; int rt = (opcode & RT) >> 16; switch (rd) { case 0: /* CPU number */ regs->regs[rt] = smp_processor_id(); return 0; case 1: /* SYNCI length */ regs->regs[rt] = min(current_cpu_data.dcache.linesz, current_cpu_data.icache.linesz); return 0; case 2: /* Read count register */ regs->regs[rt] = read_c0_count(); return 0; case 3: /* Count register resolution */ switch (current_cpu_data.cputype) { case CPU_20KC: case CPU_25KF: regs->regs[rt] = 1; break; default: regs->regs[rt] = 2; } return 0; case 29: regs->regs[rt] = ti->tp_value; return 0; default: return -1; } } /* Not ours. */ return -1; } static int simulate_sync(struct pt_regs *regs, unsigned int opcode) { if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) return 0; return -1; /* Must be something else ... */ } asmlinkage void do_ov(struct pt_regs *regs) { siginfo_t info; die_if_kernel("Integer overflow", regs); info.si_code = FPE_INTOVF; info.si_signo = SIGFPE; info.si_errno = 0; info.si_addr = (void __user *) regs->cp0_epc; force_sig_info(SIGFPE, &info, current); } /* * XXX Delayed fp exceptions when doing a lazy ctx switch XXX */ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) { siginfo_t info; if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP) return; die_if_kernel("FP exception in kernel code", regs); if (fcr31 & FPU_CSR_UNI_X) { int sig; /* * Unimplemented operation exception. If we've got the full * software emulator on-board, let's use it... * * Force FPU to dump state into task/thread context. We're * moving a lot of data here for what is probably a single * instruction, but the alternative is to pre-decode the FP * register operands before invoking the emulator, which seems * a bit extreme for what should be an infrequent event. */ /* Ensure 'resume' not overwrite saved fp context again. */ lose_fpu(1); /* Run the emulator */ sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1); /* * We can't allow the emulated instruction to leave any of * the cause bit set in $fcr31. */ current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; /* Restore the hardware register state */ own_fpu(1); /* Using the FPU again. */ /* If something went wrong, signal */ if (sig) force_sig(sig, current); return; } else if (fcr31 & FPU_CSR_INV_X) info.si_code = FPE_FLTINV; else if (fcr31 & FPU_CSR_DIV_X) info.si_code = FPE_FLTDIV; else if (fcr31 & FPU_CSR_OVF_X) info.si_code = FPE_FLTOVF; else if (fcr31 & FPU_CSR_UDF_X) info.si_code = FPE_FLTUND; else if (fcr31 & FPU_CSR_INE_X) info.si_code = FPE_FLTRES; else info.si_code = __SI_FAULT; info.si_signo = SIGFPE; info.si_errno = 0; info.si_addr = (void __user *) regs->cp0_epc; force_sig_info(SIGFPE, &info, current); } static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, const char *str) { siginfo_t info; char b[40]; #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) return; #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) return; /* * A short test says that IRIX 5.3 sends SIGTRAP for all trap * insns, even for trap and break codes that indicate arithmetic * failures. Weird ... * But should we continue the brokenness??? --macro */ switch (code) { case BRK_OVERFLOW: case BRK_DIVZERO: scnprintf(b, sizeof(b), "%s instruction in kernel code", str); die_if_kernel(b, regs); if (code == BRK_DIVZERO) info.si_code = FPE_INTDIV; else info.si_code = FPE_INTOVF; info.si_signo = SIGFPE; info.si_errno = 0; info.si_addr = (void __user *) regs->cp0_epc; force_sig_info(SIGFPE, &info, current); break; case BRK_BUG: die_if_kernel("Kernel bug detected", regs); force_sig(SIGTRAP, current); break; case BRK_MEMU: /* * Address errors may be deliberately induced by the FPU * emulator to retake control of the CPU after executing the * instruction in the delay slot of an emulated branch. * * Terminate if exception was recognized as a delay slot return * otherwise handle as normal. */ if (do_dsemulret(regs)) return; die_if_kernel("Math emu break/trap", regs); force_sig(SIGTRAP, current); break; default: scnprintf(b, sizeof(b), "%s instruction in kernel code", str); die_if_kernel(b, regs); force_sig(SIGTRAP, current); } } asmlinkage void do_bp(struct pt_regs *regs) { unsigned int opcode, bcode; if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) goto out_sigsegv; /* * There is the ancient bug in the MIPS assemblers that the break * code starts left to bit 16 instead to bit 6 in the opcode. * Gas is bug-compatible, but not always, grrr... * We handle both cases with a simple heuristics. --macro */ bcode = ((opcode >> 6) & ((1 << 20) - 1)); if (bcode >= (1 << 10)) bcode >>= 10; /* * notify the kprobe handlers, if instruction is likely to * pertain to them. */ switch (bcode) { case BRK_KPROBE_BP: if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) return; else break; case BRK_KPROBE_SSTEPBP: if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) return; else break; default: break; } do_trap_or_bp(regs, bcode, "Break"); return; out_sigsegv: force_sig(SIGSEGV, current); } asmlinkage void do_tr(struct pt_regs *regs) { unsigned int opcode, tcode = 0; if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) goto out_sigsegv; /* Immediate versions don't provide a code. */ if (!(opcode & OPCODE)) tcode = ((opcode >> 6) & ((1 << 10) - 1)); do_trap_or_bp(regs, tcode, "Trap"); return; out_sigsegv: force_sig(SIGSEGV, current); } asmlinkage void do_ri(struct pt_regs *regs) { unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); unsigned long old_epc = regs->cp0_epc; unsigned int opcode = 0; int status = -1; if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL) == NOTIFY_STOP) return; die_if_kernel("Reserved instruction in kernel code", regs); if (unlikely(compute_return_epc(regs) < 0)) return; if (unlikely(get_user(opcode, epc) < 0)) status = SIGSEGV; if (!cpu_has_llsc && status < 0) status = simulate_llsc(regs, opcode); if (status < 0) status = simulate_rdhwr(regs, opcode); if (status < 0) status = simulate_sync(regs, opcode); if (status < 0) status = SIGILL; if (unlikely(status > 0)) { regs->cp0_epc = old_epc; /* Undo skip-over. */ force_sig(status, current); } } /* * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've * emulated more than some threshold number of instructions, force migration to * a "CPU" that has FP support. */ static void mt_ase_fp_affinity(void) { #ifdef CONFIG_MIPS_MT_FPAFF if (mt_fpemul_threshold > 0 && ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { /* * If there's no FPU present, or if the application has already * restricted the allowed set to exclude any CPUs with FPUs, * we'll skip the procedure. */ if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { cpumask_t tmask; current->thread.user_cpus_allowed = current->cpus_allowed; cpus_and(tmask, current->cpus_allowed, mt_fpu_cpumask); set_cpus_allowed_ptr(current, &tmask); set_thread_flag(TIF_FPUBOUND); } } #endif /* CONFIG_MIPS_MT_FPAFF */ } /* * No lock; only written during early bootup by CPU 0. */ static RAW_NOTIFIER_HEAD(cu2_chain); int __ref register_cu2_notifier(struct notifier_block *nb) { return raw_notifier_chain_register(&cu2_chain, nb); } int cu2_notifier_call_chain(unsigned long val, void *v) { return raw_notifier_call_chain(&cu2_chain, val, v); } static int default_cu2_call(struct notifier_block *nfb, unsigned long action, void *data) { struct pt_regs *regs = data; switch (action) { default: die_if_kernel("Unhandled kernel unaligned access or invalid " "instruction", regs); /* Fall through */ case CU2_EXCEPTION: force_sig(SIGILL, current); } return NOTIFY_OK; } asmlinkage void do_cpu(struct pt_regs *regs) { unsigned int __user *epc; unsigned long old_epc; unsigned int opcode; unsigned int cpid; int status; unsigned long __maybe_unused flags; die_if_kernel("do_cpu invoked from kernel context!", regs); cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; switch (cpid) { case 0: epc = (unsigned int __user *)exception_epc(regs); old_epc = regs->cp0_epc; opcode = 0; status = -1; if (unlikely(compute_return_epc(regs) < 0)) return; if (unlikely(get_user(opcode, epc) < 0)) status = SIGSEGV; if (!cpu_has_llsc && status < 0) status = simulate_llsc(regs, opcode); if (status < 0) status = simulate_rdhwr(regs, opcode); if (status < 0) status = SIGILL; if (unlikely(status > 0)) { regs->cp0_epc = old_epc; /* Undo skip-over. */ force_sig(status, current); } return; case 1: if (used_math()) /* Using the FPU again. */ own_fpu(1); else { /* First time FPU user. */ init_fpu(); set_used_math(); } if (!raw_cpu_has_fpu) { int sig; sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0); if (sig) force_sig(sig, current); else mt_ase_fp_affinity(); } return; case 2: raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); return; case 3: break; } force_sig(SIGILL, current); } asmlinkage void do_mdmx(struct pt_regs *regs) { force_sig(SIGILL, current); } /* * Called with interrupts disabled. */ asmlinkage void do_watch(struct pt_regs *regs) { u32 cause; /* * Clear WP (bit 22) bit of cause register so we don't loop * forever. */ cause = read_c0_cause(); cause &= ~(1 << 22); write_c0_cause(cause); /* * If the current thread has the watch registers loaded, save * their values and send SIGTRAP. Otherwise another thread * left the registers set, clear them and continue. */ if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { mips_read_watch_registers(); local_irq_enable(); force_sig(SIGTRAP, current); } else { mips_clear_watch_registers(); local_irq_enable(); } } asmlinkage void do_mcheck(struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); int multi_match = regs->cp0_status & ST0_TS; show_regs(regs); if (multi_match) { printk("Index : %0x\n", read_c0_index()); printk("Pagemask: %0x\n", read_c0_pagemask()); printk("EntryHi : %0*lx\n", field, read_c0_entryhi()); printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); printk("\n"); dump_tlb_all(); } show_code((unsigned int __user *) regs->cp0_epc); /* * Some chips may have other causes of machine check (e.g. SB1 * graduation timer) */ panic("Caught Machine Check exception - %scaused by multiple " "matching entries in the TLB.", (multi_match) ? "" : "not "); } asmlinkage void do_mt(struct pt_regs *regs) { int subcode; subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) >> VPECONTROL_EXCPT_SHIFT; switch (subcode) { case 0: printk(KERN_DEBUG "Thread Underflow\n"); break; case 1: printk(KERN_DEBUG "Thread Overflow\n"); break; case 2: printk(KERN_DEBUG "Invalid YIELD Qualifier\n"); break; case 3: printk(KERN_DEBUG "Gating Storage Exception\n"); break; case 4: printk(KERN_DEBUG "YIELD Scheduler Exception\n"); break; case 5: printk(KERN_DEBUG "Gating Storage Schedulier Exception\n"); break; default: printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n", subcode); break; } die_if_kernel("MIPS MT Thread exception in kernel", regs); force_sig(SIGILL, current); } asmlinkage void do_dsp(struct pt_regs *regs) { if (cpu_has_dsp) panic("Unexpected DSP exception\n"); force_sig(SIGILL, current); } asmlinkage void do_reserved(struct pt_regs *regs) { /* * Game over - no way to handle this if it ever occurs. Most probably * caused by a new unknown cpu type or after another deadly * hard/software error. */ show_regs(regs); panic("Caught reserved exception %ld - should not happen.", (regs->cp0_cause & 0x7f) >> 2); } static int __initdata l1parity = 1; static int __init nol1parity(char *s) { l1parity = 0; return 1; } __setup("nol1par", nol1parity); static int __initdata l2parity = 1; static int __init nol2parity(char *s) { l2parity = 0; return 1; } __setup("nol2par", nol2parity); /* * Some MIPS CPUs can enable/disable for cache parity detection, but do * it different ways. */ static inline void parity_protection_init(void) { switch (current_cpu_type()) { case CPU_24K: case CPU_34K: case CPU_74K: case CPU_1004K: { #define ERRCTL_PE 0x80000000 #define ERRCTL_L2P 0x00800000 unsigned long errctl; unsigned int l1parity_present, l2parity_present; errctl = read_c0_ecc(); errctl &= ~(ERRCTL_PE|ERRCTL_L2P); /* probe L1 parity support */ write_c0_ecc(errctl | ERRCTL_PE); back_to_back_c0_hazard(); l1parity_present = (read_c0_ecc() & ERRCTL_PE); /* probe L2 parity support */ write_c0_ecc(errctl|ERRCTL_L2P); back_to_back_c0_hazard(); l2parity_present = (read_c0_ecc() & ERRCTL_L2P); if (l1parity_present && l2parity_present) { if (l1parity) errctl |= ERRCTL_PE; if (l1parity ^ l2parity) errctl |= ERRCTL_L2P; } else if (l1parity_present) { if (l1parity) errctl |= ERRCTL_PE; } else if (l2parity_present) { if (l2parity) errctl |= ERRCTL_L2P; } else { /* No parity available */ } printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl); write_c0_ecc(errctl); back_to_back_c0_hazard(); errctl = read_c0_ecc(); printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl); if (l1parity_present) printk(KERN_INFO "Cache parity protection %sabled\n", (errctl & ERRCTL_PE) ? "en" : "dis"); if (l2parity_present) { if (l1parity_present && l1parity) errctl ^= ERRCTL_L2P; printk(KERN_INFO "L2 cache parity protection %sabled\n", (errctl & ERRCTL_L2P) ? "en" : "dis"); } } break; case CPU_5KC: write_c0_ecc(0x80000000); back_to_back_c0_hazard(); /* Set the PE bit (bit 31) in the c0_errctl register. */ printk(KERN_INFO "Cache parity protection %sabled\n", (read_c0_ecc() & 0x80000000) ? "en" : "dis"); break; case CPU_20KC: case CPU_25KF: /* Clear the DE bit (bit 16) in the c0_status register. */ printk(KERN_INFO "Enable cache parity protection for " "MIPS 20KC/25KF CPUs.\n"); clear_c0_status(ST0_DE); break; default: break; } } asmlinkage void cache_parity_error(void) { const int field = 2 * sizeof(unsigned long); unsigned int reg_val; /* For the moment, report the problem and hang. */ printk("Cache error exception:\n"); printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); reg_val = read_c0_cacheerr(); printk("c0_cacheerr == %08x\n", reg_val); printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", reg_val & (1<<30) ? "secondary" : "primary", reg_val & (1<<31) ? "data" : "insn"); printk("Error bits: %s%s%s%s%s%s%s\n", reg_val & (1<<29) ? "ED " : "", reg_val & (1<<28) ? "ET " : "", reg_val & (1<<26) ? "EE " : "", reg_val & (1<<25) ? "EB " : "", reg_val & (1<<24) ? "EI " : "", reg_val & (1<<23) ? "E1 " : "", reg_val & (1<<22) ? "E0 " : ""); printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) if (reg_val & (1<<22)) printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0()); if (reg_val & (1<<23)) printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1()); #endif panic("Can't handle the cache error!"); } /* * SDBBP EJTAG debug exception handler. * We skip the instruction and return to the next instruction. */ void ejtag_exception_handler(struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); unsigned long depc, old_epc; unsigned int debug; printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); depc = read_c0_depc(); debug = read_c0_debug(); printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug); if (debug & 0x80000000) { /* * In branch delay slot. * We cheat a little bit here and use EPC to calculate the * debug return address (DEPC). EPC is restored after the * calculation. */ old_epc = regs->cp0_epc; regs->cp0_epc = depc; __compute_return_epc(regs); depc = regs->cp0_epc; regs->cp0_epc = old_epc; } else depc += 4; write_c0_depc(depc); #if 0 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n"); write_c0_debug(debug | 0x100); #endif } /* * NMI exception handler. */ NORET_TYPE void ATTRIB_NORET nmi_exception_handler(struct pt_regs *regs) { bust_spinlocks(1); printk("NMI taken!!!!\n"); die("NMI", regs); } #define VECTORSPACING 0x100 /* for EI/VI mode */ unsigned long ebase; unsigned long exception_handlers[32]; unsigned long vi_handlers[64]; void __init *set_except_vector(int n, void *addr) { unsigned long handler = (unsigned long) addr; unsigned long old_handler = exception_handlers[n]; exception_handlers[n] = handler; if (n == 0 && cpu_has_divec) { unsigned long jump_mask = ~((1 << 28) - 1); u32 *buf = (u32 *)(ebase + 0x200); unsigned int k0 = 26; if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { uasm_i_j(&buf, handler & ~jump_mask); uasm_i_nop(&buf); } else { UASM_i_LA(&buf, k0, handler); uasm_i_jr(&buf, k0); uasm_i_nop(&buf); } local_flush_icache_range(ebase + 0x200, (unsigned long)buf); } return (void *)old_handler; } static asmlinkage void do_default_vi(void) { show_regs(get_irq_regs()); panic("Caught unexpected vectored interrupt."); } static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) { unsigned long handler; unsigned long old_handler = vi_handlers[n]; int srssets = current_cpu_data.srsets; u32 *w; unsigned char *b; BUG_ON(!cpu_has_veic && !cpu_has_vint); if (addr == NULL) { handler = (unsigned long) do_default_vi; srs = 0; } else handler = (unsigned long) addr; vi_handlers[n] = (unsigned long) addr; b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); if (srs >= srssets) panic("Shadow register set %d not supported", srs); if (cpu_has_veic) { if (board_bind_eic_interrupt) board_bind_eic_interrupt(n, srs); } else if (cpu_has_vint) { /* SRSMap is only defined if shadow sets are implemented */ if (srssets > 1) change_c0_srsmap(0xf << n*4, srs << n*4); } if (srs == 0) { /* * If no shadow set is selected then use the default handler * that does normal register saving and a standard interrupt exit */ extern char except_vec_vi, except_vec_vi_lui; extern char except_vec_vi_ori, except_vec_vi_end; extern char rollback_except_vec_vi; char *vec_start = (cpu_wait == r4k_wait) ? &rollback_except_vec_vi : &except_vec_vi; #ifdef CONFIG_MIPS_MT_SMTC /* * We need to provide the SMTC vectored interrupt handler * not only with the address of the handler, but with the * Status.IM bit to be masked before going there. */ extern char except_vec_vi_mori; const int mori_offset = &except_vec_vi_mori - vec_start; #endif /* CONFIG_MIPS_MT_SMTC */ const int handler_len = &except_vec_vi_end - vec_start; const int lui_offset = &except_vec_vi_lui - vec_start; const int ori_offset = &except_vec_vi_ori - vec_start; if (handler_len > VECTORSPACING) { /* * Sigh... panicing won't help as the console * is probably not configured :( */ panic("VECTORSPACING too small"); } memcpy(b, vec_start, handler_len); #ifdef CONFIG_MIPS_MT_SMTC BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ w = (u32 *)(b + mori_offset); *w = (*w & 0xffff0000) | (0x100 << n); #endif /* CONFIG_MIPS_MT_SMTC */ w = (u32 *)(b + lui_offset); *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); w = (u32 *)(b + ori_offset); *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); local_flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len)); } else { /* * In other cases jump directly to the interrupt handler * * It is the handlers responsibility to save registers if required * (eg hi/lo) and return from the exception using "eret" */ w = (u32 *)b; *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ *w = 0; local_flush_icache_range((unsigned long)b, (unsigned long)(b+8)); } return (void *)old_handler; } void *set_vi_handler(int n, vi_handler_t addr) { return set_vi_srs_handler(n, addr, 0); } extern void cpu_cache_init(void); extern void tlb_init(void); extern void flush_tlb_handlers(void); /* * Timer interrupt */ int cp0_compare_irq; int cp0_compare_irq_shift; /* * Performance counter IRQ or -1 if shared with timer */ int cp0_perfcount_irq; EXPORT_SYMBOL_GPL(cp0_perfcount_irq); static int __cpuinitdata noulri; static int __init ulri_disable(char *s) { pr_info("Disabling ulri\n"); noulri = 1; return 1; } __setup("noulri", ulri_disable); void __cpuinit per_cpu_trap_init(void) { unsigned int cpu = smp_processor_id(); unsigned int status_set = ST0_CU0; #ifdef CONFIG_MIPS_MT_SMTC int secondaryTC = 0; int bootTC = (cpu == 0); /* * Only do per_cpu_trap_init() for first TC of Each VPE. * Note that this hack assumes that the SMTC init code * assigns TCs consecutively and in ascending order. */ if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) secondaryTC = 1; #endif /* CONFIG_MIPS_MT_SMTC */ /* * Disable coprocessors and select 32-bit or 64-bit addressing * and the 16/32 or 32/32 FPR register model. Reset the BEV * flag that some firmware may have left set and the TS bit (for * IP27). Set XX for ISA IV code to work. */ #ifdef CONFIG_64BIT status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; #endif if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) status_set |= ST0_XX; if (cpu_has_dsp) status_set |= ST0_MX; change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, status_set); if (cpu_has_mips_r2) { unsigned int enable = 0x0000000f | cpu_hwrena_impl_bits; if (!noulri && cpu_has_userlocal) enable |= (1 << 29); write_c0_hwrena(enable); } #ifdef CONFIG_MIPS_MT_SMTC if (!secondaryTC) { #endif /* CONFIG_MIPS_MT_SMTC */ if (cpu_has_veic || cpu_has_vint) { unsigned long sr = set_c0_status(ST0_BEV); write_c0_ebase(ebase); write_c0_status(sr); /* Setting vector spacing enables EI/VI mode */ change_c0_intctl(0x3e0, VECTORSPACING); } if (cpu_has_divec) { if (cpu_has_mipsmt) { unsigned int vpflags = dvpe(); set_c0_cause(CAUSEF_IV); evpe(vpflags); } else set_c0_cause(CAUSEF_IV); } /* * Before R2 both interrupt numbers were fixed to 7, so on R2 only: * * o read IntCtl.IPTI to determine the timer interrupt * o read IntCtl.IPPCI to determine the performance counter interrupt */ if (cpu_has_mips_r2) { cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; if (cp0_perfcount_irq == cp0_compare_irq) cp0_perfcount_irq = -1; } else { cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; cp0_compare_irq_shift = cp0_compare_irq; cp0_perfcount_irq = -1; } #ifdef CONFIG_MIPS_MT_SMTC } #endif /* CONFIG_MIPS_MT_SMTC */ cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; TLBMISS_HANDLER_SETUP(); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); #ifdef CONFIG_MIPS_MT_SMTC if (bootTC) { #endif /* CONFIG_MIPS_MT_SMTC */ cpu_cache_init(); tlb_init(); #ifdef CONFIG_MIPS_MT_SMTC } else if (!secondaryTC) { /* * First TC in non-boot VPE must do subset of tlb_init() * for MMU countrol registers. */ write_c0_pagemask(PM_DEFAULT_MASK); write_c0_wired(0); } #endif /* CONFIG_MIPS_MT_SMTC */ } /* Install CPU exception handler */ void __init set_handler(unsigned long offset, void *addr, unsigned long size) { memcpy((void *)(ebase + offset), addr, size); local_flush_icache_range(ebase + offset, ebase + offset + size); } static char panic_null_cerr[] __cpuinitdata = "Trying to set NULL cache error exception handler"; /* * Install uncached CPU exception handler. * This is suitable only for the cache error exception which is the only * exception handler that is being run uncached. */ void __cpuinit set_uncached_handler(unsigned long offset, void *addr, unsigned long size) { unsigned long uncached_ebase = CKSEG1ADDR(ebase); if (!addr) panic(panic_null_cerr); memcpy((void *)(uncached_ebase + offset), addr, size); } static int __initdata rdhwr_noopt; static int __init set_rdhwr_noopt(char *str) { rdhwr_noopt = 1; return 1; } __setup("rdhwr_noopt", set_rdhwr_noopt); void __init trap_init(void) { extern char except_vec3_generic, except_vec3_r4000; extern char except_vec4; unsigned long i; int rollback; check_wait(); rollback = (cpu_wait == r4k_wait); #if defined(CONFIG_KGDB) if (kgdb_early_setup) return; /* Already done */ #endif if (cpu_has_veic || cpu_has_vint) { unsigned long size = 0x200 + VECTORSPACING*64; ebase = (unsigned long) __alloc_bootmem(size, 1 << fls(size), 0); } else { ebase = CKSEG0; if (cpu_has_mips_r2) ebase += (read_c0_ebase() & 0x3ffff000); } per_cpu_trap_init(); /* * Copy the generic exception handlers to their final destination. * This will be overriden later as suitable for a particular * configuration. */ set_handler(0x180, &except_vec3_generic, 0x80); /* * Setup default vectors */ for (i = 0; i <= 31; i++) set_except_vector(i, handle_reserved); /* * Copy the EJTAG debug exception vector handler code to it's final * destination. */ if (cpu_has_ejtag && board_ejtag_handler_setup) board_ejtag_handler_setup(); /* * Only some CPUs have the watch exceptions. */ if (cpu_has_watch) set_except_vector(23, handle_watch); /* * Initialise interrupt handlers */ if (cpu_has_veic || cpu_has_vint) { int nvec = cpu_has_veic ? 64 : 8; for (i = 0; i < nvec; i++) set_vi_handler(i, NULL); } else if (cpu_has_divec) set_handler(0x200, &except_vec4, 0x8); /* * Some CPUs can enable/disable for cache parity detection, but does * it different ways. */ parity_protection_init(); /* * The Data Bus Errors / Instruction Bus Errors are signaled * by external hardware. Therefore these two exceptions * may have board specific handlers. */ if (board_be_init) board_be_init(); set_except_vector(0, rollback ? rollback_handle_int : handle_int); set_except_vector(1, handle_tlbm); set_except_vector(2, handle_tlbl); set_except_vector(3, handle_tlbs); set_except_vector(4, handle_adel); set_except_vector(5, handle_ades); set_except_vector(6, handle_ibe); set_except_vector(7, handle_dbe); set_except_vector(8, handle_sys); set_except_vector(9, handle_bp); set_except_vector(10, rdhwr_noopt ? handle_ri : (cpu_has_vtag_icache ? handle_ri_rdhwr_vivt : handle_ri_rdhwr)); set_except_vector(11, handle_cpu); set_except_vector(12, handle_ov); set_except_vector(13, handle_tr); if (current_cpu_type() == CPU_R6000 || current_cpu_type() == CPU_R6000A) { /* * The R6000 is the only R-series CPU that features a machine * check exception (similar to the R4000 cache error) and * unaligned ldc1/sdc1 exception. The handlers have not been * written yet. Well, anyway there is no R6000 machine on the * current list of targets for Linux/MIPS. * (Duh, crap, there is someone with a triple R6k machine) */ //set_except_vector(14, handle_mc); //set_except_vector(15, handle_ndc); } if (board_nmi_handler_setup) board_nmi_handler_setup(); if (cpu_has_fpu && !cpu_has_nofpuex) set_except_vector(15, handle_fpe); set_except_vector(22, handle_mdmx); if (cpu_has_mcheck) set_except_vector(24, handle_mcheck); if (cpu_has_mipsmt) set_except_vector(25, handle_mt); set_except_vector(26, handle_dsp); if (cpu_has_vce) /* Special exception: R4[04]00 uses also the divec space. */ memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100); else if (cpu_has_4kex) memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80); else memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80); local_flush_icache_range(ebase, ebase + 0x400); flush_tlb_handlers(); sort_extable(__start___dbe_table, __stop___dbe_table); cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ }
gpl-2.0
drod2169/android_kernel_lge_bullhead
drivers/staging/qcacld-2.0/CORE/BAP/src/bapRsn8021xPrf.c
75
8095
/* * Copyright (c) 2014 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * This file was originally distributed by Qualcomm Atheros, Inc. * under proprietary terms before Copyright ownership was assigned * to the Linux Foundation. */ /* * $File: //depot/software/projects/feature_branches/gen5_phase1/os/linux/classic/ap/apps/ssm/auth8021x/ani8021xPrf.c $ * */ /* * Contains definitions for routines to calculate the 802.11i PRF * functions. * * Author: Mayank D. Upadhyay * Date: 19-March-2003 * History:- * Date Modified by Modification Information * ------------------------------------------------------ */ /*#include <assert.h> #include <stdlib.h> #include <aniSsmSha1.h> */ #include "vos_utils.h" #include "vos_memory.h" #include "bapRsn8021xPrf.h" #include "bapRsnErrors.h" //#include "ani8021xUtils.h" #define AAG_PTK_PRF_ADD_PARAM 159 #define AAG_PTK_PRF_DIV_PARAM 160 #define AAG_PTK_PRF_CONST "Pairwise key expansion" #define AAG_PTK_PRF_CONST_LEN 22 #define AAG_PTK_PRF_LM_POS 0 #define AAG_PTK_PRF_HM_POS 6 #define AAG_PTK_PRF_LN_POS 12 #define AAG_PTK_PRF_HN_POS (AAG_PTK_PRF_LN_POS + ANI_EAPOL_KEY_RSN_NONCE_SIZE) #define AAG_PTK_PRF_TEXT_LEN (AAG_PTK_PRF_HN_POS + ANI_EAPOL_KEY_RSN_NONCE_SIZE) #define AAG_GTK_PRF_CONST "Group key expansion" #define AAG_GTK_PRF_CONST_LEN 19 #define AAG_GTK_PRF_MAC_POS 0 #define AAG_GTK_PRF_NONCE_POS 6 #define AAG_GTK_PRF_TEXT_LEN (AAG_GTK_PRF_NONCE_POS + ANI_EAPOL_KEY_RSN_NONCE_SIZE) /** * aagGetKeyMaterialLen * * Returns the number of bytes of the PTK that have to be provided to * the MAC layer for a given cipher type. * * @param cipherType the cipher-type * * @return the number of bytes of key material for this cipher type, * or 0 for invalid cipher types. */ int aagGetKeyMaterialLen(eCsrEncryptionType cipherType) { switch (cipherType) { case eCSR_ENCRYPT_TYPE_AES: return AAG_RSN_KEY_MATERIAL_LEN_CCMP; break; default: return 0; break; }; } /** * aagPtkPrf * * The PRF used for calculating the pairwise temporal key under IEEE * 802.11i. * * @param result a fixed size array where the outputis stored. Should * have enough place for the SHA-1 overflow. * @param prfLen the number of BITS desired from the PRF result * @param pmk the pairwise master-key * @param authAddr the MAC address of the authenticator * @param suppAddr the MAC address of the supplicant * @param aNonce the nonce generated by the authenticator * @param sNonce the nonce generated by the supplicant * * @return ANI_OK if the operation succeeds */ int aagPtkPrf(v_U32_t cryptHandle, v_U8_t result[AAG_PRF_MAX_OUTPUT_SIZE], v_U32_t prfLen, tAniPacket *pmk, tAniMacAddr authAddr, tAniMacAddr suppAddr, v_U8_t aNonce[ANI_EAPOL_KEY_RSN_NONCE_SIZE], v_U8_t sNonce[ANI_EAPOL_KEY_RSN_NONCE_SIZE]) { v_U8_t *lowMac; v_U8_t *highMac; v_U8_t *lowNonce; v_U8_t *highNonce; v_U8_t *keyBytes; int keyLen; v_U8_t text[AAG_PTK_PRF_TEXT_LEN]; //Cannot use voss function here because vos_mem_compare doesn't tell whihc is larger if (vos_mem_compare2(authAddr, suppAddr, sizeof(tAniMacAddr)) < 0) { lowMac = authAddr; highMac = suppAddr; } else { lowMac = suppAddr; highMac = authAddr; } if (vos_mem_compare2(aNonce, sNonce, ANI_EAPOL_KEY_RSN_NONCE_SIZE) < 0) { lowNonce = aNonce; highNonce = sNonce; } else { lowNonce = sNonce; highNonce = aNonce; } vos_mem_copy(text + AAG_PTK_PRF_LM_POS, lowMac, sizeof(tAniMacAddr)); vos_mem_copy(text + AAG_PTK_PRF_HM_POS, highMac, sizeof(tAniMacAddr)); vos_mem_copy(text + AAG_PTK_PRF_LN_POS, lowNonce, ANI_EAPOL_KEY_RSN_NONCE_SIZE); vos_mem_copy(text + AAG_PTK_PRF_HN_POS, highNonce, ANI_EAPOL_KEY_RSN_NONCE_SIZE); keyLen = aniAsfPacketGetBytes(pmk, &keyBytes); if( !ANI_IS_STATUS_SUCCESS( keyLen ) ) { return keyLen; } return aagPrf(cryptHandle, result, keyBytes, keyLen, (v_U8_t *)AAG_PTK_PRF_CONST, AAG_PTK_PRF_CONST_LEN, text, sizeof(text), prfLen); } /** * aagGtkPrf * * The PRF used for calculating the group temporal key under IEEE * 802.11i. * * @param result a fixed size array where the outputis stored. Should * have enough place for the SHA-1 overflow. * @param prfLen the number of BITS desired from the PRF result * @param gmk the group master-key * @param authAddr the MAC address of the authenticator * @param gNonce the nonce generated by the authenticator for this purpose * * @return ANI_OK if the operation succeeds */ int aagGtkPrf(v_U32_t cryptHandle, v_U8_t result[AAG_PRF_MAX_OUTPUT_SIZE], v_U32_t prfLen, v_U8_t gmk[AAG_RSN_GMK_SIZE], tAniMacAddr authAddr, v_U8_t gNonce[ANI_EAPOL_KEY_RSN_NONCE_SIZE]) { v_U8_t text[AAG_GTK_PRF_TEXT_LEN]; vos_mem_copy(text + AAG_GTK_PRF_MAC_POS, authAddr, sizeof(tAniMacAddr)); vos_mem_copy(text + AAG_GTK_PRF_NONCE_POS, gNonce, ANI_EAPOL_KEY_RSN_NONCE_SIZE); return aagPrf(cryptHandle, result, gmk, AAG_RSN_GMK_SIZE, (v_U8_t *)AAG_GTK_PRF_CONST, AAG_GTK_PRF_CONST_LEN, text, sizeof(text), prfLen); } /** * aagPrf * * The raw PRF function that is used in IEEE 802.11i. * * @param result a fixed size array where the outputis stored. Should * have enough place for the SHA-1 overflow. * @param key the key to use in the PRF * @param keyLen the length of the key * @param a the parameter A which is usually a unique label * @param aLen the length of the parameter A * @ param b the parameter B * @param bLen the length of parameter B * @param prfLen the number to BITS desired from the PRF result * * @return ANI_OK if the operation succeeds */ int aagPrf(v_U32_t cryptHandle, v_U8_t result[AAG_PRF_MAX_OUTPUT_SIZE], v_U8_t *key, v_U8_t keyLen, v_U8_t *a, v_U8_t aLen, v_U8_t *b, v_U8_t bLen, v_U32_t prfLen) { static v_U8_t y; v_U8_t *hmacText = NULL; v_U8_t *resultOffset = result; int numLoops; int loopCtrPos; int i, retVal=0; hmacText = vos_mem_malloc( aLen + bLen + 2 ); if( NULL == hmacText ) { return ANI_E_NULL_VALUE; } vos_mem_copy(hmacText + 0, a, aLen); hmacText[aLen] = y; vos_mem_copy(hmacText + aLen + 1, b, bLen); loopCtrPos = aLen + 1 + bLen; numLoops = prfLen + AAG_PTK_PRF_ADD_PARAM; numLoops /= AAG_PTK_PRF_DIV_PARAM; for (i = 0; i < numLoops; i++) { if ((resultOffset - result + VOS_DIGEST_SHA1_SIZE) > AAG_PRF_MAX_OUTPUT_SIZE) { VOS_ASSERT(0); return ANI_ERROR; } hmacText[loopCtrPos] = i; if( VOS_IS_STATUS_SUCCESS( vos_sha1_hmac_str(cryptHandle, hmacText, loopCtrPos + 1, key, keyLen, resultOffset) ) ) { resultOffset += VOS_DIGEST_SHA1_SIZE; retVal = ANI_OK; } else { retVal = ANI_ERROR; } } vos_mem_free(hmacText); return retVal; }
gpl-2.0
xobs/u-boot-novena-spl
nand_spl/board/amcc/canyonlands/ddr2_fixed.c
75
3964
/* * (C) Copyright 2008-2009 * Stefan Roese, DENX Software Engineering, sr@denx.de. * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <ppc4xx.h> #include <asm/io.h> #include <asm/processor.h> /* * This code can configure those two Crucial SODIMM's: * * Crucial CT6464AC667.4FE - 512MB SO-DIMM (single rank) * Crucial CT6464AC667.8FB - 512MB SO-DIMM (dual rank) * */ #define TEST_ADDR 0x10000000 #define TEST_MAGIC 0x11223344 static void wait_init_complete(void) { u32 val; do { mfsdram(SDRAM_MCSTAT, val); } while (!(val & 0x80000000)); } static void ddr_start(void) { mtsdram(SDRAM_MCOPT2, 0x28000000); wait_init_complete(); } static void ddr_init_common(void) { /* * Reset the DDR-SDRAM controller. */ mtsdr(SDR0_SRST, (0x80000000 >> 10)); mtsdr(SDR0_SRST, 0x00000000); /* * These values are cloned from a running NOR booting * Canyonlands with SPD-DDR2 detection and calibration * enabled. This will only work for the same memory * configuration as used here: * */ mtsdram(SDRAM_MCOPT2, 0x00000000); mtsdram(SDRAM_MODT0, 0x01000000); mtsdram(SDRAM_WRDTR, 0x82000823); mtsdram(SDRAM_CLKTR, 0x40000000); mtsdram(SDRAM_MB0CF, 0x00000201); mtsdram(SDRAM_RTR, 0x06180000); mtsdram(SDRAM_SDTR1, 0x80201000); mtsdram(SDRAM_SDTR2, 0x42103243); mtsdram(SDRAM_SDTR3, 0x0A0D0D16); mtsdram(SDRAM_MMODE, 0x00000632); mtsdram(SDRAM_MEMODE, 0x00000040); mtsdram(SDRAM_INITPLR0, 0xB5380000); mtsdram(SDRAM_INITPLR1, 0x82100400); mtsdram(SDRAM_INITPLR2, 0x80820000); mtsdram(SDRAM_INITPLR3, 0x80830000); mtsdram(SDRAM_INITPLR4, 0x80810040); mtsdram(SDRAM_INITPLR5, 0x80800532); mtsdram(SDRAM_INITPLR6, 0x82100400); mtsdram(SDRAM_INITPLR7, 0x8A080000); mtsdram(SDRAM_INITPLR8, 0x8A080000); mtsdram(SDRAM_INITPLR9, 0x8A080000); mtsdram(SDRAM_INITPLR10, 0x8A080000); mtsdram(SDRAM_INITPLR11, 0x80000432); mtsdram(SDRAM_INITPLR12, 0x808103C0); mtsdram(SDRAM_INITPLR13, 0x80810040); mtsdram(SDRAM_INITPLR14, 0x00000000); mtsdram(SDRAM_INITPLR15, 0x00000000); mtsdram(SDRAM_RDCC, 0x40000000); mtsdram(SDRAM_RQDC, 0x80000038); mtsdram(SDRAM_RFDC, 0x00000257); mtdcr(SDRAM_R0BAS, 0x0000F800); /* MQ0_B0BAS */ mtdcr(SDRAM_R1BAS, 0x0400F800); /* MQ0_B1BAS */ } phys_size_t initdram(int board_type) { /* * First try init for this module: * * Crucial CT6464AC667.8FB - 512MB SO-DIMM (dual rank) */ ddr_init_common(); /* * Crucial CT6464AC667.8FB - 512MB SO-DIMM */ mtdcr(SDRAM_R0BAS, 0x0000F800); mtdcr(SDRAM_R1BAS, 0x0400F800); mtsdram(SDRAM_MCOPT1, 0x05122000); mtsdram(SDRAM_CODT, 0x02800021); mtsdram(SDRAM_MB1CF, 0x00000201); ddr_start(); /* * Now test if the dual-ranked module is really installed * by checking an address in the upper 256MByte region */ out_be32((void *)TEST_ADDR, TEST_MAGIC); if (in_be32((void *)TEST_ADDR) != TEST_MAGIC) { /* * The test failed, so we assume that the single * ranked module is installed: * * Crucial CT6464AC667.4FE - 512MB SO-DIMM (single rank) */ ddr_init_common(); mtdcr(SDRAM_R0BAS, 0x0000F000); mtsdram(SDRAM_MCOPT1, 0x05322000); mtsdram(SDRAM_CODT, 0x00800021); ddr_start(); } return CONFIG_SYS_MBYTES_SDRAM << 20; }
gpl-2.0
Driim/crypt_ubifs
drivers/staging/vt6656/card.c
75
25560
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: card.c * Purpose: Provide functions to setup NIC operation mode * Functions: * s_vSafeResetTx - Rest Tx * CARDvSetRSPINF - Set RSPINF * vUpdateIFS - Update slotTime,SIFS,DIFS, and EIFS * CARDvUpdateBasicTopRate - Update BasicTopRate * CARDbAddBasicRate - Add to BasicRateSet * CARDbSetBasicRate - Set Basic Tx Rate * CARDbIsOFDMinBasicRate - Check if any OFDM rate is in BasicRateSet * CARDvSetLoopbackMode - Set Loopback mode * CARDbSoftwareReset - Sortware reset NIC * CARDqGetTSFOffset - Calculate TSFOffset * CARDbGetCurrentTSF - Read Current NIC TSF counter * CARDqGetNextTBTT - Calculate Next Beacon TSF counter * CARDvSetFirstNextTBTT - Set NIC Beacon time * CARDvUpdateNextTBTT - Sync. NIC Beacon time * CARDbRadioPowerOff - Turn Off NIC Radio Power * CARDbRadioPowerOn - Turn On NIC Radio Power * CARDbSetWEPMode - Set NIC Wep mode * CARDbSetTxPower - Set NIC tx power * * Revision History: * 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec. * 08-26-2003 Kyle Hsu: Modify the definition type of dwIoBase. * 09-01-2003 Bryan YC Fan: Add vUpdateIFS(). * */ #include "device.h" #include "tmacro.h" #include "card.h" #include "baseband.h" #include "mac.h" #include "desc.h" #include "rf.h" #include "power.h" #include "key.h" #include "rc4.h" #include "country.h" #include "datarate.h" #include "rndis.h" #include "control.h" //static int msglevel =MSG_LEVEL_DEBUG; static int msglevel =MSG_LEVEL_INFO; //const u16 cwRXBCNTSFOff[MAX_RATE] = //{17, 34, 96, 192, 34, 23, 17, 11, 8, 5, 4, 3}; const u16 cwRXBCNTSFOff[MAX_RATE] = {192, 96, 34, 17, 34, 23, 17, 11, 8, 5, 4, 3}; /* * Description: Set NIC media channel * * Parameters: * In: * pDevice - The adapter to be set * uConnectionChannel - Channel to be set * Out: * none */ void CARDbSetMediaChannel(struct vnt_private *pDevice, u32 uConnectionChannel) { if (pDevice->byBBType == BB_TYPE_11A) { // 15 ~ 38 if ((uConnectionChannel < (CB_MAX_CHANNEL_24G+1)) || (uConnectionChannel > CB_MAX_CHANNEL)) uConnectionChannel = (CB_MAX_CHANNEL_24G+1); } else { if ((uConnectionChannel > CB_MAX_CHANNEL_24G) || (uConnectionChannel == 0)) // 1 ~ 14 uConnectionChannel = 1; } // clear NAV MACvRegBitsOn(pDevice, MAC_REG_MACCR, MACCR_CLRNAV); // Set Channel[7] = 0 to tell H/W channel is changing now. MACvRegBitsOff(pDevice, MAC_REG_CHANNEL, 0x80); //if (pMgmt->uCurrChannel == uConnectionChannel) // return bResult; CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_SELECT_CHANNLE, (u16) uConnectionChannel, 0, 0, NULL ); //{{ RobertYu: 20041202 //// TX_PE will reserve 3 us for MAX2829 A mode only, it is for better TX throughput if (pDevice->byBBType == BB_TYPE_11A) { pDevice->byCurPwr = 0xFF; RFbRawSetPower(pDevice, pDevice->abyOFDMAPwrTbl[uConnectionChannel-15], RATE_54M); } else if (pDevice->byBBType == BB_TYPE_11G) { pDevice->byCurPwr = 0xFF; RFbRawSetPower(pDevice, pDevice->abyOFDMPwrTbl[uConnectionChannel-1], RATE_54M); } else { pDevice->byCurPwr = 0xFF; RFbRawSetPower(pDevice, pDevice->abyCCKPwrTbl[uConnectionChannel-1], RATE_1M); } ControlvWriteByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_CHANNEL,(u8)(uConnectionChannel|0x80)); } /* * Description: Get CCK mode basic rate * * Parameters: * In: * pDevice - The adapter to be set * wRateIdx - Receiving data rate * Out: * none * * Return Value: response Control frame rate * */ static u16 swGetCCKControlRate(struct vnt_private *pDevice, u16 wRateIdx) { u16 ui = wRateIdx; while (ui > RATE_1M) { if (pDevice->wBasicRate & (1 << ui)) return ui; ui--; } return RATE_1M; } /* * Description: Get OFDM mode basic rate * * Parameters: * In: * pDevice - The adapter to be set * wRateIdx - Receiving data rate * Out: * none * * Return Value: response Control frame rate * */ static u16 swGetOFDMControlRate(struct vnt_private *pDevice, u16 wRateIdx) { u16 ui = wRateIdx; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BASIC RATE: %X\n", pDevice->wBasicRate); if (!CARDbIsOFDMinBasicRate(pDevice)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "swGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx); if (wRateIdx > RATE_24M) wRateIdx = RATE_24M; return wRateIdx; } while (ui > RATE_11M) { if (pDevice->wBasicRate & (1 << ui)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "swGetOFDMControlRate: %d\n", ui); return ui; } ui--; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"swGetOFDMControlRate: 6M\n"); return RATE_24M; } /* * Description: Calculate TxRate and RsvTime fields for RSPINF in OFDM mode. * * Parameters: * In: * wRate - Tx Rate * byPktType - Tx Packet type * Out: * pbyTxRate - pointer to RSPINF TxRate field * pbyRsvTime - pointer to RSPINF RsvTime field * * Return Value: none * */ void CARDvCalculateOFDMRParameter ( u16 wRate, u8 byBBType, u8 * pbyTxRate, u8 * pbyRsvTime ) { switch (wRate) { case RATE_6M : if (byBBType == BB_TYPE_11A) {//5GHZ *pbyTxRate = 0x9B; *pbyRsvTime = 24; } else { *pbyTxRate = 0x8B; *pbyRsvTime = 30; } break; case RATE_9M : if (byBBType == BB_TYPE_11A) {//5GHZ *pbyTxRate = 0x9F; *pbyRsvTime = 16; } else { *pbyTxRate = 0x8F; *pbyRsvTime = 22; } break; case RATE_12M : if (byBBType == BB_TYPE_11A) {//5GHZ *pbyTxRate = 0x9A; *pbyRsvTime = 12; } else { *pbyTxRate = 0x8A; *pbyRsvTime = 18; } break; case RATE_18M : if (byBBType == BB_TYPE_11A) {//5GHZ *pbyTxRate = 0x9E; *pbyRsvTime = 8; } else { *pbyTxRate = 0x8E; *pbyRsvTime = 14; } break; case RATE_36M : if (byBBType == BB_TYPE_11A) {//5GHZ *pbyTxRate = 0x9D; *pbyRsvTime = 4; } else { *pbyTxRate = 0x8D; *pbyRsvTime = 10; } break; case RATE_48M : if (byBBType == BB_TYPE_11A) {//5GHZ *pbyTxRate = 0x98; *pbyRsvTime = 4; } else { *pbyTxRate = 0x88; *pbyRsvTime = 10; } break; case RATE_54M : if (byBBType == BB_TYPE_11A) {//5GHZ *pbyTxRate = 0x9C; *pbyRsvTime = 4; } else { *pbyTxRate = 0x8C; *pbyRsvTime = 10; } break; case RATE_24M : default : if (byBBType == BB_TYPE_11A) {//5GHZ *pbyTxRate = 0x99; *pbyRsvTime = 8; } else { *pbyTxRate = 0x89; *pbyRsvTime = 14; } break; } } /* * Description: Set RSPINF * * Parameters: * In: * pDevice - The adapter to be set * Out: * none * * Return Value: None. * */ void CARDvSetRSPINF(struct vnt_private *pDevice, u8 byBBType) { struct vnt_phy_field phy[4]; u8 abyTxRate[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; /* For OFDM */ u8 abyRsvTime[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; u8 abyData[34]; int i; //RSPINF_b_1 BBvCalculateParameter(pDevice, 14, swGetCCKControlRate(pDevice, RATE_1M), PK_TYPE_11B, &phy[0]); ///RSPINF_b_2 BBvCalculateParameter(pDevice, 14, swGetCCKControlRate(pDevice, RATE_2M), PK_TYPE_11B, &phy[1]); //RSPINF_b_5 BBvCalculateParameter(pDevice, 14, swGetCCKControlRate(pDevice, RATE_5M), PK_TYPE_11B, &phy[2]); //RSPINF_b_11 BBvCalculateParameter(pDevice, 14, swGetCCKControlRate(pDevice, RATE_11M), PK_TYPE_11B, &phy[3]); //RSPINF_a_6 CARDvCalculateOFDMRParameter (RATE_6M, byBBType, &abyTxRate[0], &abyRsvTime[0]); //RSPINF_a_9 CARDvCalculateOFDMRParameter (RATE_9M, byBBType, &abyTxRate[1], &abyRsvTime[1]); //RSPINF_a_12 CARDvCalculateOFDMRParameter (RATE_12M, byBBType, &abyTxRate[2], &abyRsvTime[2]); //RSPINF_a_18 CARDvCalculateOFDMRParameter (RATE_18M, byBBType, &abyTxRate[3], &abyRsvTime[3]); //RSPINF_a_24 CARDvCalculateOFDMRParameter (RATE_24M, byBBType, &abyTxRate[4], &abyRsvTime[4]); //RSPINF_a_36 CARDvCalculateOFDMRParameter (swGetOFDMControlRate(pDevice, RATE_36M), byBBType, &abyTxRate[5], &abyRsvTime[5]); //RSPINF_a_48 CARDvCalculateOFDMRParameter (swGetOFDMControlRate(pDevice, RATE_48M), byBBType, &abyTxRate[6], &abyRsvTime[6]); //RSPINF_a_54 CARDvCalculateOFDMRParameter (swGetOFDMControlRate(pDevice, RATE_54M), byBBType, &abyTxRate[7], &abyRsvTime[7]); //RSPINF_a_72 CARDvCalculateOFDMRParameter (swGetOFDMControlRate(pDevice, RATE_54M), byBBType, &abyTxRate[8], &abyRsvTime[8]); put_unaligned(phy[0].len, (u16 *)&abyData[0]); abyData[2] = phy[0].signal; abyData[3] = phy[0].service; put_unaligned(phy[1].len, (u16 *)&abyData[4]); abyData[6] = phy[1].signal; abyData[7] = phy[1].service; put_unaligned(phy[2].len, (u16 *)&abyData[8]); abyData[10] = phy[2].signal; abyData[11] = phy[2].service; put_unaligned(phy[3].len, (u16 *)&abyData[12]); abyData[14] = phy[3].signal; abyData[15] = phy[3].service; for (i = 0; i < 9; i++) { abyData[16+i*2] = abyTxRate[i]; abyData[16+i*2+1] = abyRsvTime[i]; } CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, MAC_REG_RSPINF_B_1, MESSAGE_REQUEST_MACREG, 34, &abyData[0]); } /* * Description: Update IFS * * Parameters: * In: * pDevice - The adapter to be set * Out: * none * * Return Value: None. * */ void vUpdateIFS(struct vnt_private *pDevice) { u8 byMaxMin = 0; u8 byData[4]; if (pDevice->byPacketType==PK_TYPE_11A) {//0000 0000 0000 0000,11a pDevice->uSlot = C_SLOT_SHORT; pDevice->uSIFS = C_SIFS_A; pDevice->uDIFS = C_SIFS_A + 2*C_SLOT_SHORT; pDevice->uCwMin = C_CWMIN_A; byMaxMin = 4; } else if (pDevice->byPacketType==PK_TYPE_11B) {//0000 0001 0000 0000,11b pDevice->uSlot = C_SLOT_LONG; pDevice->uSIFS = C_SIFS_BG; pDevice->uDIFS = C_SIFS_BG + 2*C_SLOT_LONG; pDevice->uCwMin = C_CWMIN_B; byMaxMin = 5; } else {// PK_TYPE_11GA & PK_TYPE_11GB u8 byRate = 0; bool bOFDMRate = false; unsigned int ii = 0; PWLAN_IE_SUPP_RATES pItemRates = NULL; pDevice->uSIFS = C_SIFS_BG; if (pDevice->bShortSlotTime) { pDevice->uSlot = C_SLOT_SHORT; } else { pDevice->uSlot = C_SLOT_LONG; } pDevice->uDIFS = C_SIFS_BG + 2*pDevice->uSlot; pItemRates = (PWLAN_IE_SUPP_RATES)pDevice->vnt_mgmt.abyCurrSuppRates; for (ii = 0; ii < pItemRates->len; ii++) { byRate = (u8)(pItemRates->abyRates[ii]&0x7F); if (RATEwGetRateIdx(byRate) > RATE_11M) { bOFDMRate = true; break; } } if (bOFDMRate == false) { pItemRates = (PWLAN_IE_SUPP_RATES)pDevice->vnt_mgmt .abyCurrExtSuppRates; for (ii = 0; ii < pItemRates->len; ii++) { byRate = (u8)(pItemRates->abyRates[ii]&0x7F); if (RATEwGetRateIdx(byRate) > RATE_11M) { bOFDMRate = true; break; } } } if (bOFDMRate == true) { pDevice->uCwMin = C_CWMIN_A; byMaxMin = 4; } else { pDevice->uCwMin = C_CWMIN_B; byMaxMin = 5; } } pDevice->uCwMax = C_CWMAX; pDevice->uEIFS = C_EIFS; byData[0] = (u8)pDevice->uSIFS; byData[1] = (u8)pDevice->uDIFS; byData[2] = (u8)pDevice->uEIFS; byData[3] = (u8)pDevice->uSlot; CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, MAC_REG_SIFS, MESSAGE_REQUEST_MACREG, 4, &byData[0]); byMaxMin |= 0xA0;//1010 1111,C_CWMAX = 1023 CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, MAC_REG_CWMAXMIN0, MESSAGE_REQUEST_MACREG, 1, &byMaxMin); } void CARDvUpdateBasicTopRate(struct vnt_private *pDevice) { u8 byTopOFDM = RATE_24M, byTopCCK = RATE_1M; u8 ii; //Determines the highest basic rate. for (ii = RATE_54M; ii >= RATE_6M; ii --) { if ( (pDevice->wBasicRate) & ((u16)(1<<ii)) ) { byTopOFDM = ii; break; } } pDevice->byTopOFDMBasicRate = byTopOFDM; for (ii = RATE_11M;; ii --) { if ( (pDevice->wBasicRate) & ((u16)(1<<ii)) ) { byTopCCK = ii; break; } if (ii == RATE_1M) break; } pDevice->byTopCCKBasicRate = byTopCCK; } /* * Description: Set NIC Tx Basic Rate * * Parameters: * In: * pDevice - The adapter to be set * wBasicRate - Basic Rate to be set * Out: * none * * Return Value: true if succeeded; false if failed. * */ void CARDbAddBasicRate(struct vnt_private *pDevice, u16 wRateIdx) { u16 wRate = (1 << wRateIdx); pDevice->wBasicRate |= wRate; //Determines the highest basic rate. CARDvUpdateBasicTopRate(pDevice); } int CARDbIsOFDMinBasicRate(struct vnt_private *pDevice) { int ii; for (ii = RATE_54M; ii >= RATE_6M; ii --) { if ((pDevice->wBasicRate) & ((u16)(1<<ii))) return true; } return false; } u8 CARDbyGetPktType(struct vnt_private *pDevice) { if (pDevice->byBBType == BB_TYPE_11A || pDevice->byBBType == BB_TYPE_11B) { return (u8)pDevice->byBBType; } else if (CARDbIsOFDMinBasicRate(pDevice)) { return PK_TYPE_11GA; } else { return PK_TYPE_11GB; } } /* * Description: Calculate TSF offset of two TSF input * Get TSF Offset from RxBCN's TSF and local TSF * * Parameters: * In: * pDevice - The adapter to be sync. * qwTSF1 - Rx BCN's TSF * qwTSF2 - Local TSF * Out: * none * * Return Value: TSF Offset value * */ u64 CARDqGetTSFOffset(u8 byRxRate, u64 qwTSF1, u64 qwTSF2) { u64 qwTSFOffset = 0; u16 wRxBcnTSFOffst = 0; wRxBcnTSFOffst = cwRXBCNTSFOff[byRxRate % MAX_RATE]; qwTSF2 += (u64)wRxBcnTSFOffst; qwTSFOffset = qwTSF1 - qwTSF2; return qwTSFOffset; } /* * Description: Sync. TSF counter to BSS * Get TSF offset and write to HW * * Parameters: * In: * pDevice - The adapter to be sync. * qwBSSTimestamp - Rx BCN's TSF * qwLocalTSF - Local TSF * Out: * none * * Return Value: none * */ void CARDvAdjustTSF(struct vnt_private *pDevice, u8 byRxRate, u64 qwBSSTimestamp, u64 qwLocalTSF) { u64 qwTSFOffset = 0; u8 pbyData[8]; qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp, qwLocalTSF); // adjust TSF // HW's TSF add TSF Offset reg pbyData[0] = (u8)qwTSFOffset; pbyData[1] = (u8)(qwTSFOffset >> 8); pbyData[2] = (u8)(qwTSFOffset >> 16); pbyData[3] = (u8)(qwTSFOffset >> 24); pbyData[4] = (u8)(qwTSFOffset >> 32); pbyData[5] = (u8)(qwTSFOffset >> 40); pbyData[6] = (u8)(qwTSFOffset >> 48); pbyData[7] = (u8)(qwTSFOffset >> 56); CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_SET_TSFTBTT, MESSAGE_REQUEST_TSF, 0, 8, pbyData ); } /* * Description: Read NIC TSF counter * Get local TSF counter * * Parameters: * In: * pDevice - The adapter to be read * Out: * qwCurrTSF - Current TSF counter * * Return Value: true if success; otherwise false * */ bool CARDbGetCurrentTSF(struct vnt_private *pDevice, u64 *pqwCurrTSF) { *pqwCurrTSF = pDevice->qwCurrTSF; return true; } /* * Description: Clear NIC TSF counter * Clear local TSF counter * * Parameters: * In: * pDevice - The adapter to be read * * Return Value: true if success; otherwise false * */ bool CARDbClearCurrentTSF(struct vnt_private *pDevice) { MACvRegBitsOn(pDevice, MAC_REG_TFTCTL, TFTCTL_TSFCNTRST); pDevice->qwCurrTSF = 0; return true; } /* * Description: Read NIC TSF counter * Get NEXTTBTT from adjusted TSF and Beacon Interval * * Parameters: * In: * qwTSF - Current TSF counter * wbeaconInterval - Beacon Interval * Out: * qwCurrTSF - Current TSF counter * * Return Value: TSF value of next Beacon * */ u64 CARDqGetNextTBTT(u64 qwTSF, u16 wBeaconInterval) { unsigned int uLowNextTBTT; unsigned int uHighRemain, uLowRemain; unsigned int uBeaconInterval; uBeaconInterval = wBeaconInterval * 1024; // Next TBTT = ((local_current_TSF / beacon_interval) + 1 ) * beacon_interval uLowNextTBTT = ((qwTSF & 0xffffffffULL) >> 10) << 10; uLowRemain = (uLowNextTBTT) % uBeaconInterval; uHighRemain = ((0x80000000 % uBeaconInterval) * 2 * (u32)(qwTSF >> 32)) % uBeaconInterval; uLowRemain = (uHighRemain + uLowRemain) % uBeaconInterval; uLowRemain = uBeaconInterval - uLowRemain; // check if carry when add one beacon interval if ((~uLowNextTBTT) < uLowRemain) qwTSF = ((qwTSF >> 32) + 1) << 32; qwTSF = (qwTSF & 0xffffffff00000000ULL) | (u64)(uLowNextTBTT + uLowRemain); return (qwTSF); } /* * Description: Set NIC TSF counter for first Beacon time * Get NEXTTBTT from adjusted TSF and Beacon Interval * * Parameters: * In: * dwIoBase - IO Base * wBeaconInterval - Beacon Interval * Out: * none * * Return Value: none * */ void CARDvSetFirstNextTBTT(struct vnt_private *pDevice, u16 wBeaconInterval) { u64 qwNextTBTT = 0; u8 pbyData[8]; CARDbClearCurrentTSF(pDevice); //CARDbGetCurrentTSF(pDevice, &qwNextTBTT); //Get Local TSF counter qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval); // Set NextTBTT pbyData[0] = (u8)qwNextTBTT; pbyData[1] = (u8)(qwNextTBTT >> 8); pbyData[2] = (u8)(qwNextTBTT >> 16); pbyData[3] = (u8)(qwNextTBTT >> 24); pbyData[4] = (u8)(qwNextTBTT >> 32); pbyData[5] = (u8)(qwNextTBTT >> 40); pbyData[6] = (u8)(qwNextTBTT >> 48); pbyData[7] = (u8)(qwNextTBTT >> 56); CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_SET_TSFTBTT, MESSAGE_REQUEST_TBTT, 0, 8, pbyData ); return; } /* * Description: Sync NIC TSF counter for Beacon time * Get NEXTTBTT and write to HW * * Parameters: * In: * pDevice - The adapter to be set * qwTSF - Current TSF counter * wBeaconInterval - Beacon Interval * Out: * none * * Return Value: none * */ void CARDvUpdateNextTBTT(struct vnt_private *pDevice, u64 qwTSF, u16 wBeaconInterval) { u8 pbyData[8]; qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval); // Set NextTBTT pbyData[0] = (u8)qwTSF; pbyData[1] = (u8)(qwTSF >> 8); pbyData[2] = (u8)(qwTSF >> 16); pbyData[3] = (u8)(qwTSF >> 24); pbyData[4] = (u8)(qwTSF >> 32); pbyData[5] = (u8)(qwTSF >> 40); pbyData[6] = (u8)(qwTSF >> 48); pbyData[7] = (u8)(qwTSF >> 56); CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_SET_TSFTBTT, MESSAGE_REQUEST_TBTT, 0, 8, pbyData ); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Card:Update Next TBTT[%8lx]\n", (unsigned long)qwTSF); return; } /* * Description: Turn off Radio power * * Parameters: * In: * pDevice - The adapter to be turned off * Out: * none * * Return Value: true if success; otherwise false * */ int CARDbRadioPowerOff(struct vnt_private *pDevice) { int bResult = true; //if (pDevice->bRadioOff == true) // return true; pDevice->bRadioOff = true; switch (pDevice->byRFType) { case RF_AL2230: case RF_AL2230S: case RF_AIROHA7230: case RF_VT3226: //RobertYu:20051111 case RF_VT3226D0: case RF_VT3342A0: //RobertYu:20060609 MACvRegBitsOff(pDevice, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3)); break; } MACvRegBitsOff(pDevice, MAC_REG_HOSTCR, HOSTCR_RXON); BBvSetDeepSleep(pDevice); return bResult; } /* * Description: Turn on Radio power * * Parameters: * In: * pDevice - The adapter to be turned on * Out: * none * * Return Value: true if success; otherwise false * */ int CARDbRadioPowerOn(struct vnt_private *pDevice) { int bResult = true; if ((pDevice->bHWRadioOff == true) || (pDevice->bRadioControlOff == true)) { return false; } //if (pDevice->bRadioOff == false) // return true; pDevice->bRadioOff = false; BBvExitDeepSleep(pDevice); MACvRegBitsOn(pDevice, MAC_REG_HOSTCR, HOSTCR_RXON); switch (pDevice->byRFType) { case RF_AL2230: case RF_AL2230S: case RF_AIROHA7230: case RF_VT3226: //RobertYu:20051111 case RF_VT3226D0: case RF_VT3342A0: //RobertYu:20060609 MACvRegBitsOn(pDevice, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3)); break; } return bResult; } void CARDvSetBSSMode(struct vnt_private *pDevice) { // Set BB and packet type at the same time.//{{RobertYu:20050222, AL7230 have two TX PA output, only connet to b/g now // so in 11a mode need to set the MAC Reg0x4C to 11b/g mode to turn on PA if( (pDevice->byRFType == RF_AIROHA7230 ) && (pDevice->byBBType == BB_TYPE_11A) ) { MACvSetBBType(pDevice, BB_TYPE_11G); } else { MACvSetBBType(pDevice, pDevice->byBBType); } pDevice->byPacketType = CARDbyGetPktType(pDevice); if (pDevice->byBBType == BB_TYPE_11A) { ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x88, 0x03); } else if (pDevice->byBBType == BB_TYPE_11B) { ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x88, 0x02); } else if (pDevice->byBBType == BB_TYPE_11G) { ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x88, 0x08); } vUpdateIFS(pDevice); CARDvSetRSPINF(pDevice, (u8)pDevice->byBBType); if ( pDevice->byBBType == BB_TYPE_11A ) { //request by Jack 2005-04-26 if (pDevice->byRFType == RF_AIROHA7230) { pDevice->abyBBVGA[0] = 0x20; ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xE7, pDevice->abyBBVGA[0]); } pDevice->abyBBVGA[2] = 0x10; pDevice->abyBBVGA[3] = 0x10; } else { //request by Jack 2005-04-26 if (pDevice->byRFType == RF_AIROHA7230) { pDevice->abyBBVGA[0] = 0x1C; ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xE7, pDevice->abyBBVGA[0]); } pDevice->abyBBVGA[2] = 0x0; pDevice->abyBBVGA[3] = 0x0; } }
gpl-2.0
infectedmushi/kernel-sony-copyleft
drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
331
8695
/* Digital Input IRQ Function Selection */ #define APCI1564_DI_INT_OR (0 << 1) #define APCI1564_DI_INT_AND (1 << 1) /* Digital Input Interrupt Enable Disable. */ #define APCI1564_DI_INT_ENABLE 0x4 #define APCI1564_DI_INT_DISABLE 0xfffffffb /* Digital Output Interrupt Enable Disable. */ #define APCI1564_DO_VCC_INT_ENABLE 0x1 #define APCI1564_DO_VCC_INT_DISABLE 0xfffffffe #define APCI1564_DO_CC_INT_ENABLE 0x2 #define APCI1564_DO_CC_INT_DISABLE 0xfffffffd /* TIMER COUNTER WATCHDOG DEFINES */ #define ADDIDATA_TIMER 0 #define ADDIDATA_COUNTER 1 #define ADDIDATA_WATCHDOG 2 #define APCI1564_COUNTER1 0 #define APCI1564_COUNTER2 1 #define APCI1564_COUNTER3 2 #define APCI1564_COUNTER4 3 /* * devpriv->amcc_iobase Register Map */ #define APCI1564_DI_REG 0x04 #define APCI1564_DI_INT_MODE1_REG 0x08 #define APCI1564_DI_INT_MODE2_REG 0x0c #define APCI1564_DI_INT_STATUS_REG 0x10 #define APCI1564_DI_IRQ_REG 0x14 #define APCI1564_DO_REG 0x18 #define APCI1564_DO_INT_CTRL_REG 0x1c #define APCI1564_DO_INT_STATUS_REG 0x20 #define APCI1564_DO_IRQ_REG 0x24 #define APCI1564_WDOG_REG 0x28 #define APCI1564_WDOG_RELOAD_REG 0x2c #define APCI1564_WDOG_TIMEBASE_REG 0x30 #define APCI1564_WDOG_CTRL_REG 0x34 #define APCI1564_WDOG_STATUS_REG 0x38 #define APCI1564_WDOG_IRQ_REG 0x3c #define APCI1564_WDOG_WARN_TIMEVAL_REG 0x40 #define APCI1564_WDOG_WARN_TIMEBASE_REG 0x44 #define APCI1564_TIMER_REG 0x48 #define APCI1564_TIMER_RELOAD_REG 0x4c #define APCI1564_TIMER_TIMEBASE_REG 0x50 #define APCI1564_TIMER_CTRL_REG 0x54 #define APCI1564_TIMER_STATUS_REG 0x58 #define APCI1564_TIMER_IRQ_REG 0x5c #define APCI1564_TIMER_WARN_TIMEVAL_REG 0x60 #define APCI1564_TIMER_WARN_TIMEBASE_REG 0x64 /* * dev->iobase Register Map */ #define APCI1564_COUNTER_REG(x) (0x00 + ((x) * 0x20)) #define APCI1564_COUNTER_RELOAD_REG(x) (0x04 + ((x) * 0x20)) #define APCI1564_COUNTER_TIMEBASE_REG(x) (0x08 + ((x) * 0x20)) #define APCI1564_COUNTER_CTRL_REG(x) (0x0c + ((x) * 0x20)) #define APCI1564_COUNTER_STATUS_REG(x) (0x10 + ((x) * 0x20)) #define APCI1564_COUNTER_IRQ_REG(x) (0x14 + ((x) * 0x20)) #define APCI1564_COUNTER_WARN_TIMEVAL_REG(x) (0x18 + ((x) * 0x20)) #define APCI1564_COUNTER_WARN_TIMEBASE_REG(x) (0x1c + ((x) * 0x20)) /* * Configures The Timer or Counter * * data[0] Configure as: 0 = Timer, 1 = Counter * data[1] 1 = Enable Interrupt, 0 = Disable Interrupt * data[2] Time Unit * data[3] Reload Value * data[4] Timer Mode * data[5] Timer Counter Watchdog Number * data[6] Counter Direction */ static int apci1564_timer_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct apci1564_private *devpriv = dev->private; unsigned int ul_Command1 = 0; devpriv->tsk_current = current; if (data[0] == ADDIDATA_TIMER) { /* First Stop The Timer */ ul_Command1 = inl(devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG); ul_Command1 = ul_Command1 & 0xFFFFF9FEUL; /* Stop The Timer */ outl(ul_Command1, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG); devpriv->timer_select_mode = ADDIDATA_TIMER; if (data[1] == 1) { /* Enable TIMER int & DISABLE ALL THE OTHER int SOURCES */ outl(0x02, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG); outl(0x0, devpriv->amcc_iobase + APCI1564_DI_IRQ_REG); outl(0x0, devpriv->amcc_iobase + APCI1564_DO_IRQ_REG); outl(0x0, devpriv->amcc_iobase + APCI1564_WDOG_IRQ_REG); outl(0x0, dev->iobase + APCI1564_COUNTER_IRQ_REG(APCI1564_COUNTER1)); outl(0x0, dev->iobase + APCI1564_COUNTER_IRQ_REG(APCI1564_COUNTER2)); outl(0x0, dev->iobase + APCI1564_COUNTER_IRQ_REG(APCI1564_COUNTER3)); outl(0x0, dev->iobase + APCI1564_COUNTER_IRQ_REG(APCI1564_COUNTER4)); } else { /* disable Timer interrupt */ outl(0x0, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG); } /* Loading Timebase */ outl(data[2], devpriv->amcc_iobase + APCI1564_TIMER_TIMEBASE_REG); /* Loading the Reload value */ outl(data[3], devpriv->amcc_iobase + APCI1564_TIMER_RELOAD_REG); ul_Command1 = inl(devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG); ul_Command1 = (ul_Command1 & 0xFFF719E2UL) | 2UL << 13UL | 0x10UL; /* mode 2 */ outl(ul_Command1, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG); } else if (data[0] == ADDIDATA_COUNTER) { devpriv->timer_select_mode = ADDIDATA_COUNTER; devpriv->mode_select_register = data[5]; /* First Stop The Counter */ ul_Command1 = inl(dev->iobase + APCI1564_COUNTER_CTRL_REG(data[5] - 1)); ul_Command1 = ul_Command1 & 0xFFFFF9FEUL; /* Stop The Timer */ outl(ul_Command1, dev->iobase + APCI1564_COUNTER_CTRL_REG(data[5] - 1)); /* Set the reload value */ outl(data[3], dev->iobase + APCI1564_COUNTER_RELOAD_REG(data[5] - 1)); /* Set the mode : */ /* - Disable the hardware */ /* - Disable the counter mode */ /* - Disable the warning */ /* - Disable the reset */ /* - Disable the timer mode */ /* - Enable the counter mode */ ul_Command1 = (ul_Command1 & 0xFFFC19E2UL) | 0x80000UL | (unsigned int) ((unsigned int) data[4] << 16UL); outl(ul_Command1, dev->iobase + APCI1564_COUNTER_CTRL_REG(data[5] - 1)); /* Enable or Disable Interrupt */ ul_Command1 = (ul_Command1 & 0xFFFFF9FD) | (data[1] << 1); outl(ul_Command1, dev->iobase + APCI1564_COUNTER_CTRL_REG(data[5] - 1)); /* Set the Up/Down selection */ ul_Command1 = (ul_Command1 & 0xFFFBF9FFUL) | (data[6] << 18); outl(ul_Command1, dev->iobase + APCI1564_COUNTER_CTRL_REG(data[5] - 1)); } else { dev_err(dev->class_dev, "Invalid subdevice.\n"); } return insn->n; } /* * Start / Stop The Selected Timer or Counter * * data[0] Configure as: 0 = Timer, 1 = Counter * data[1] 0 = Stop, 1 = Start, 2 = Trigger Clear (Only Counter) */ static int apci1564_timer_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct apci1564_private *devpriv = dev->private; unsigned int ul_Command1 = 0; if (devpriv->timer_select_mode == ADDIDATA_TIMER) { if (data[1] == 1) { ul_Command1 = inl(devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG); ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x1UL; /* Enable the Timer */ outl(ul_Command1, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG); } else if (data[1] == 0) { /* Stop The Timer */ ul_Command1 = inl(devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG); ul_Command1 = ul_Command1 & 0xFFFFF9FEUL; outl(ul_Command1, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG); } } else if (devpriv->timer_select_mode == ADDIDATA_COUNTER) { ul_Command1 = inl(dev->iobase + APCI1564_COUNTER_CTRL_REG(devpriv->mode_select_register - 1)); if (data[1] == 1) { /* Start the Counter subdevice */ ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x1UL; } else if (data[1] == 0) { /* Stops the Counter subdevice */ ul_Command1 = 0; } else if (data[1] == 2) { /* Clears the Counter subdevice */ ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x400; } outl(ul_Command1, dev->iobase + APCI1564_COUNTER_CTRL_REG(devpriv->mode_select_register - 1)); } else { dev_err(dev->class_dev, "Invalid subdevice.\n"); } return insn->n; } /* * Read The Selected Timer or Counter */ static int apci1564_timer_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct apci1564_private *devpriv = dev->private; unsigned int ul_Command1 = 0; if (devpriv->timer_select_mode == ADDIDATA_TIMER) { /* Stores the status of the Timer */ data[0] = inl(devpriv->amcc_iobase + APCI1564_TIMER_STATUS_REG) & 0x1; /* Stores the Actual value of the Timer */ data[1] = inl(devpriv->amcc_iobase + APCI1564_TIMER_REG); } else if (devpriv->timer_select_mode == ADDIDATA_COUNTER) { /* Read the Counter Actual Value. */ data[0] = inl(dev->iobase + APCI1564_COUNTER_REG(devpriv->mode_select_register - 1)); ul_Command1 = inl(dev->iobase + APCI1564_COUNTER_STATUS_REG(devpriv->mode_select_register - 1)); /* Get the software trigger status */ data[1] = (unsigned char) ((ul_Command1 >> 1) & 1); /* Get the hardware trigger status */ data[2] = (unsigned char) ((ul_Command1 >> 2) & 1); /* Get the software clear status */ data[3] = (unsigned char) ((ul_Command1 >> 3) & 1); /* Get the overflow status */ data[4] = (unsigned char) ((ul_Command1 >> 0) & 1); } else { dev_err(dev->class_dev, "Invalid subdevice.\n"); } return insn->n; }
gpl-2.0
bigzz/linux-stable
arch/powerpc/kernel/cputable.c
587
67939
/* * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) * * Modifications for ppc64: * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/string.h> #include <linux/sched.h> #include <linux/threads.h> #include <linux/init.h> #include <linux/export.h> #include <asm/oprofile_impl.h> #include <asm/cputable.h> #include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */ #include <asm/mmu.h> #include <asm/setup.h> struct cpu_spec* cur_cpu_spec = NULL; EXPORT_SYMBOL(cur_cpu_spec); /* The platform string corresponding to the real PVR */ const char *powerpc_base_platform; /* NOTE: * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's * the responsibility of the appropriate CPU save/restore functions to * eventually copy these settings over. Those save/restore aren't yet * part of the cputable though. That has to be fixed for both ppc32 * and ppc64 */ #ifdef CONFIG_PPC32 extern void __setup_cpu_e200(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_e500v1(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_e500v2(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_e500mc(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440ep(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440x5(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec); extern void __setup_cpu_apm821xx(unsigned long offset, struct cpu_spec *spec); extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_750cx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_750fx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_7400(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec); #endif /* CONFIG_PPC32 */ #ifdef CONFIG_PPC64 extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_a2(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_pa6t(void); extern void __restore_cpu_ppc970(void); extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_power7(void); extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_power8(void); extern void __restore_cpu_a2(void); extern void __flush_tlb_power7(unsigned int action); extern void __flush_tlb_power8(unsigned int action); extern long __machine_check_early_realmode_p7(struct pt_regs *regs); extern long __machine_check_early_realmode_p8(struct pt_regs *regs); #endif /* CONFIG_PPC64 */ #if defined(CONFIG_E500) extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_e6500(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_e5500(void); extern void __restore_cpu_e6500(void); #endif /* CONFIG_E500 */ /* This table only contains "desktop" CPUs, it need to be filled with embedded * ones as well... */ #define COMMON_USER (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \ PPC_FEATURE_HAS_MMU) #define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64) #define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4) #define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5 |\ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) #define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS|\ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) #define COMMON_USER_POWER6 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_05 |\ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ PPC_FEATURE_TRUE_LE | \ PPC_FEATURE_PSERIES_PERFMON_COMPAT) #define COMMON_USER_POWER7 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ PPC_FEATURE_TRUE_LE | \ PPC_FEATURE_PSERIES_PERFMON_COMPAT) #define COMMON_USER2_POWER7 (PPC_FEATURE2_DSCR) #define COMMON_USER_POWER8 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ PPC_FEATURE_TRUE_LE | \ PPC_FEATURE_PSERIES_PERFMON_COMPAT) #define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \ PPC_FEATURE2_HTM_COMP | \ PPC_FEATURE2_HTM_NOSC_COMP | \ PPC_FEATURE2_DSCR | \ PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \ PPC_FEATURE2_VEC_CRYPTO) #define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\ PPC_FEATURE_TRUE_LE | \ PPC_FEATURE_HAS_ALTIVEC_COMP) #ifdef CONFIG_PPC_BOOK3E_64 #define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE) #else #define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ PPC_FEATURE_BOOKE) #endif static struct cpu_spec __initdata cpu_specs[] = { #ifdef CONFIG_PPC_BOOK3S_64 { /* Power4 */ .pvr_mask = 0xffff0000, .pvr_value = 0x00350000, .cpu_name = "POWER4 (gp)", .cpu_features = CPU_FTRS_POWER4, .cpu_user_features = COMMON_USER_POWER4, .mmu_features = MMU_FTRS_POWER4, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power4", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "power4", }, { /* Power4+ */ .pvr_mask = 0xffff0000, .pvr_value = 0x00380000, .cpu_name = "POWER4+ (gq)", .cpu_features = CPU_FTRS_POWER4, .cpu_user_features = COMMON_USER_POWER4, .mmu_features = MMU_FTRS_POWER4, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power4", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "power4", }, { /* PPC970 */ .pvr_mask = 0xffff0000, .pvr_value = 0x00390000, .cpu_name = "PPC970", .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_ppc970, .cpu_restore = __restore_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "ppc970", }, { /* PPC970FX */ .pvr_mask = 0xffff0000, .pvr_value = 0x003c0000, .cpu_name = "PPC970FX", .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_ppc970, .cpu_restore = __restore_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "ppc970", }, { /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */ .pvr_mask = 0xffffffff, .pvr_value = 0x00440100, .cpu_name = "PPC970MP", .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_ppc970, .cpu_restore = __restore_cpu_ppc970, .oprofile_cpu_type = "ppc64/970MP", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "ppc970", }, { /* PPC970MP */ .pvr_mask = 0xffff0000, .pvr_value = 0x00440000, .cpu_name = "PPC970MP", .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_ppc970MP, .cpu_restore = __restore_cpu_ppc970, .oprofile_cpu_type = "ppc64/970MP", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "ppc970", }, { /* PPC970GX */ .pvr_mask = 0xffff0000, .pvr_value = 0x00450000, .cpu_name = "PPC970GX", .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "ppc970", }, { /* Power5 GR */ .pvr_mask = 0xffff0000, .pvr_value = 0x003a0000, .cpu_name = "POWER5 (gr)", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5, .mmu_features = MMU_FTRS_POWER5, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power5", .oprofile_type = PPC_OPROFILE_POWER4, /* SIHV / SIPR bits are implemented on POWER4+ (GQ) * and above but only works on POWER5 and above */ .oprofile_mmcra_sihv = MMCRA_SIHV, .oprofile_mmcra_sipr = MMCRA_SIPR, .platform = "power5", }, { /* Power5++ */ .pvr_mask = 0xffffff00, .pvr_value = 0x003b0300, .cpu_name = "POWER5+ (gs)", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5_PLUS, .mmu_features = MMU_FTRS_POWER5, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .oprofile_cpu_type = "ppc64/power5++", .oprofile_type = PPC_OPROFILE_POWER4, .oprofile_mmcra_sihv = MMCRA_SIHV, .oprofile_mmcra_sipr = MMCRA_SIPR, .platform = "power5+", }, { /* Power5 GS */ .pvr_mask = 0xffff0000, .pvr_value = 0x003b0000, .cpu_name = "POWER5+ (gs)", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5_PLUS, .mmu_features = MMU_FTRS_POWER5, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power5+", .oprofile_type = PPC_OPROFILE_POWER4, .oprofile_mmcra_sihv = MMCRA_SIHV, .oprofile_mmcra_sipr = MMCRA_SIPR, .platform = "power5+", }, { /* POWER6 in P5+ mode; 2.04-compliant processor */ .pvr_mask = 0xffffffff, .pvr_value = 0x0f000001, .cpu_name = "POWER5+", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5_PLUS, .mmu_features = MMU_FTRS_POWER5, .icache_bsize = 128, .dcache_bsize = 128, .oprofile_cpu_type = "ppc64/ibm-compat-v1", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "power5+", }, { /* Power6 */ .pvr_mask = 0xffff0000, .pvr_value = 0x003e0000, .cpu_name = "POWER6 (raw)", .cpu_features = CPU_FTRS_POWER6, .cpu_user_features = COMMON_USER_POWER6 | PPC_FEATURE_POWER6_EXT, .mmu_features = MMU_FTRS_POWER6, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power6", .oprofile_type = PPC_OPROFILE_POWER4, .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV, .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR, .oprofile_mmcra_clear = POWER6_MMCRA_THRM | POWER6_MMCRA_OTHER, .platform = "power6x", }, { /* 2.05-compliant processor, i.e. Power6 "architected" mode */ .pvr_mask = 0xffffffff, .pvr_value = 0x0f000002, .cpu_name = "POWER6 (architected)", .cpu_features = CPU_FTRS_POWER6, .cpu_user_features = COMMON_USER_POWER6, .mmu_features = MMU_FTRS_POWER6, .icache_bsize = 128, .dcache_bsize = 128, .oprofile_cpu_type = "ppc64/ibm-compat-v1", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "power6", }, { /* 2.06-compliant processor, i.e. Power7 "architected" mode */ .pvr_mask = 0xffffffff, .pvr_value = 0x0f000003, .cpu_name = "POWER7 (architected)", .cpu_features = CPU_FTRS_POWER7, .cpu_user_features = COMMON_USER_POWER7, .cpu_user_features2 = COMMON_USER2_POWER7, .mmu_features = MMU_FTRS_POWER7, .icache_bsize = 128, .dcache_bsize = 128, .oprofile_type = PPC_OPROFILE_POWER4, .oprofile_cpu_type = "ppc64/ibm-compat-v1", .cpu_setup = __setup_cpu_power7, .cpu_restore = __restore_cpu_power7, .flush_tlb = __flush_tlb_power7, .machine_check_early = __machine_check_early_realmode_p7, .platform = "power7", }, { /* 2.07-compliant processor, i.e. Power8 "architected" mode */ .pvr_mask = 0xffffffff, .pvr_value = 0x0f000004, .cpu_name = "POWER8 (architected)", .cpu_features = CPU_FTRS_POWER8, .cpu_user_features = COMMON_USER_POWER8, .cpu_user_features2 = COMMON_USER2_POWER8, .mmu_features = MMU_FTRS_POWER8, .icache_bsize = 128, .dcache_bsize = 128, .oprofile_type = PPC_OPROFILE_INVALID, .oprofile_cpu_type = "ppc64/ibm-compat-v1", .cpu_setup = __setup_cpu_power8, .cpu_restore = __restore_cpu_power8, .flush_tlb = __flush_tlb_power8, .machine_check_early = __machine_check_early_realmode_p8, .platform = "power8", }, { /* Power7 */ .pvr_mask = 0xffff0000, .pvr_value = 0x003f0000, .cpu_name = "POWER7 (raw)", .cpu_features = CPU_FTRS_POWER7, .cpu_user_features = COMMON_USER_POWER7, .cpu_user_features2 = COMMON_USER2_POWER7, .mmu_features = MMU_FTRS_POWER7, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power7", .oprofile_type = PPC_OPROFILE_POWER4, .cpu_setup = __setup_cpu_power7, .cpu_restore = __restore_cpu_power7, .flush_tlb = __flush_tlb_power7, .machine_check_early = __machine_check_early_realmode_p7, .platform = "power7", }, { /* Power7+ */ .pvr_mask = 0xffff0000, .pvr_value = 0x004A0000, .cpu_name = "POWER7+ (raw)", .cpu_features = CPU_FTRS_POWER7, .cpu_user_features = COMMON_USER_POWER7, .cpu_user_features2 = COMMON_USER2_POWER7, .mmu_features = MMU_FTRS_POWER7, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power7", .oprofile_type = PPC_OPROFILE_POWER4, .cpu_setup = __setup_cpu_power7, .cpu_restore = __restore_cpu_power7, .flush_tlb = __flush_tlb_power7, .machine_check_early = __machine_check_early_realmode_p7, .platform = "power7+", }, { /* Power8E */ .pvr_mask = 0xffff0000, .pvr_value = 0x004b0000, .cpu_name = "POWER8E (raw)", .cpu_features = CPU_FTRS_POWER8E, .cpu_user_features = COMMON_USER_POWER8, .cpu_user_features2 = COMMON_USER2_POWER8, .mmu_features = MMU_FTRS_POWER8, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power8", .oprofile_type = PPC_OPROFILE_INVALID, .cpu_setup = __setup_cpu_power8, .cpu_restore = __restore_cpu_power8, .flush_tlb = __flush_tlb_power8, .machine_check_early = __machine_check_early_realmode_p8, .platform = "power8", }, { /* Power8NVL */ .pvr_mask = 0xffff0000, .pvr_value = 0x004c0000, .cpu_name = "POWER8NVL (raw)", .cpu_features = CPU_FTRS_POWER8, .cpu_user_features = COMMON_USER_POWER8, .cpu_user_features2 = COMMON_USER2_POWER8, .mmu_features = MMU_FTRS_POWER8, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power8", .oprofile_type = PPC_OPROFILE_INVALID, .cpu_setup = __setup_cpu_power8, .cpu_restore = __restore_cpu_power8, .flush_tlb = __flush_tlb_power8, .machine_check_early = __machine_check_early_realmode_p8, .platform = "power8", }, { /* Power8 DD1: Does not support doorbell IPIs */ .pvr_mask = 0xffffff00, .pvr_value = 0x004d0100, .cpu_name = "POWER8 (raw)", .cpu_features = CPU_FTRS_POWER8_DD1, .cpu_user_features = COMMON_USER_POWER8, .cpu_user_features2 = COMMON_USER2_POWER8, .mmu_features = MMU_FTRS_POWER8, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power8", .oprofile_type = PPC_OPROFILE_INVALID, .cpu_setup = __setup_cpu_power8, .cpu_restore = __restore_cpu_power8, .flush_tlb = __flush_tlb_power8, .machine_check_early = __machine_check_early_realmode_p8, .platform = "power8", }, { /* Power8 */ .pvr_mask = 0xffff0000, .pvr_value = 0x004d0000, .cpu_name = "POWER8 (raw)", .cpu_features = CPU_FTRS_POWER8, .cpu_user_features = COMMON_USER_POWER8, .cpu_user_features2 = COMMON_USER2_POWER8, .mmu_features = MMU_FTRS_POWER8, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power8", .oprofile_type = PPC_OPROFILE_INVALID, .cpu_setup = __setup_cpu_power8, .cpu_restore = __restore_cpu_power8, .flush_tlb = __flush_tlb_power8, .machine_check_early = __machine_check_early_realmode_p8, .platform = "power8", }, { /* Cell Broadband Engine */ .pvr_mask = 0xffff0000, .pvr_value = 0x00700000, .cpu_name = "Cell Broadband Engine", .cpu_features = CPU_FTRS_CELL, .cpu_user_features = COMMON_USER_PPC64 | PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_SMT, .mmu_features = MMU_FTRS_CELL, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/cell-be", .oprofile_type = PPC_OPROFILE_CELL, .platform = "ppc-cell-be", }, { /* PA Semi PA6T */ .pvr_mask = 0x7fff0000, .pvr_value = 0x00900000, .cpu_name = "PA6T", .cpu_features = CPU_FTRS_PA6T, .cpu_user_features = COMMON_USER_PA6T, .mmu_features = MMU_FTRS_PA6T, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 6, .pmc_type = PPC_PMC_PA6T, .cpu_setup = __setup_cpu_pa6t, .cpu_restore = __restore_cpu_pa6t, .oprofile_cpu_type = "ppc64/pa6t", .oprofile_type = PPC_OPROFILE_PA6T, .platform = "pa6t", }, { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, .cpu_name = "POWER4 (compatible)", .cpu_features = CPU_FTRS_COMPATIBLE, .cpu_user_features = COMMON_USER_PPC64, .mmu_features = MMU_FTRS_DEFAULT_HPTE_ARCH_V2, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .platform = "power4", } #endif /* CONFIG_PPC_BOOK3S_64 */ #ifdef CONFIG_PPC32 #ifdef CONFIG_PPC_BOOK3S_32 { /* 601 */ .pvr_mask = 0xffff0000, .pvr_value = 0x00010000, .cpu_name = "601", .cpu_features = CPU_FTRS_PPC601, .cpu_user_features = COMMON_USER | PPC_FEATURE_601_INSTR | PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_generic, .platform = "ppc601", }, { /* 603 */ .pvr_mask = 0xffff0000, .pvr_value = 0x00030000, .cpu_name = "603", .cpu_features = CPU_FTRS_603, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = 0, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .machine_check = machine_check_generic, .platform = "ppc603", }, { /* 603e */ .pvr_mask = 0xffff0000, .pvr_value = 0x00060000, .cpu_name = "603e", .cpu_features = CPU_FTRS_603, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = 0, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .machine_check = machine_check_generic, .platform = "ppc603", }, { /* 603ev */ .pvr_mask = 0xffff0000, .pvr_value = 0x00070000, .cpu_name = "603ev", .cpu_features = CPU_FTRS_603, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = 0, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .machine_check = machine_check_generic, .platform = "ppc603", }, { /* 604 */ .pvr_mask = 0xffff0000, .pvr_value = 0x00040000, .cpu_name = "604", .cpu_features = CPU_FTRS_604, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 2, .cpu_setup = __setup_cpu_604, .machine_check = machine_check_generic, .platform = "ppc604", }, { /* 604e */ .pvr_mask = 0xfffff000, .pvr_value = 0x00090000, .cpu_name = "604e", .cpu_features = CPU_FTRS_604, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .cpu_setup = __setup_cpu_604, .machine_check = machine_check_generic, .platform = "ppc604", }, { /* 604r */ .pvr_mask = 0xffff0000, .pvr_value = 0x00090000, .cpu_name = "604r", .cpu_features = CPU_FTRS_604, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .cpu_setup = __setup_cpu_604, .machine_check = machine_check_generic, .platform = "ppc604", }, { /* 604ev */ .pvr_mask = 0xffff0000, .pvr_value = 0x000a0000, .cpu_name = "604ev", .cpu_features = CPU_FTRS_604, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .cpu_setup = __setup_cpu_604, .machine_check = machine_check_generic, .platform = "ppc604", }, { /* 740/750 (0x4202, don't support TAU ?) */ .pvr_mask = 0xffffffff, .pvr_value = 0x00084202, .cpu_name = "740/750", .cpu_features = CPU_FTRS_740_NOTAU, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .cpu_setup = __setup_cpu_750, .machine_check = machine_check_generic, .platform = "ppc750", }, { /* 750CX (80100 and 8010x?) */ .pvr_mask = 0xfffffff0, .pvr_value = 0x00080100, .cpu_name = "750CX", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .cpu_setup = __setup_cpu_750cx, .machine_check = machine_check_generic, .platform = "ppc750", }, { /* 750CX (82201 and 82202) */ .pvr_mask = 0xfffffff0, .pvr_value = 0x00082200, .cpu_name = "750CX", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750cx, .machine_check = machine_check_generic, .platform = "ppc750", }, { /* 750CXe (82214) */ .pvr_mask = 0xfffffff0, .pvr_value = 0x00082210, .cpu_name = "750CXe", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750cx, .machine_check = machine_check_generic, .platform = "ppc750", }, { /* 750CXe "Gekko" (83214) */ .pvr_mask = 0xffffffff, .pvr_value = 0x00083214, .cpu_name = "750CXe", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750cx, .machine_check = machine_check_generic, .platform = "ppc750", }, { /* 750CL (and "Broadway") */ .pvr_mask = 0xfffff0e0, .pvr_value = 0x00087000, .cpu_name = "750CL", .cpu_features = CPU_FTRS_750CL, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750, .machine_check = machine_check_generic, .platform = "ppc750", .oprofile_cpu_type = "ppc/750", .oprofile_type = PPC_OPROFILE_G4, }, { /* 745/755 */ .pvr_mask = 0xfffff000, .pvr_value = 0x00083000, .cpu_name = "745/755", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750, .machine_check = machine_check_generic, .platform = "ppc750", }, { /* 750FX rev 1.x */ .pvr_mask = 0xffffff00, .pvr_value = 0x70000100, .cpu_name = "750FX", .cpu_features = CPU_FTRS_750FX1, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750, .machine_check = machine_check_generic, .platform = "ppc750", .oprofile_cpu_type = "ppc/750", .oprofile_type = PPC_OPROFILE_G4, }, { /* 750FX rev 2.0 must disable HID0[DPM] */ .pvr_mask = 0xffffffff, .pvr_value = 0x70000200, .cpu_name = "750FX", .cpu_features = CPU_FTRS_750FX2, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750, .machine_check = machine_check_generic, .platform = "ppc750", .oprofile_cpu_type = "ppc/750", .oprofile_type = PPC_OPROFILE_G4, }, { /* 750FX (All revs except 2.0) */ .pvr_mask = 0xffff0000, .pvr_value = 0x70000000, .cpu_name = "750FX", .cpu_features = CPU_FTRS_750FX, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750fx, .machine_check = machine_check_generic, .platform = "ppc750", .oprofile_cpu_type = "ppc/750", .oprofile_type = PPC_OPROFILE_G4, }, { /* 750GX */ .pvr_mask = 0xffff0000, .pvr_value = 0x70020000, .cpu_name = "750GX", .cpu_features = CPU_FTRS_750GX, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750fx, .machine_check = machine_check_generic, .platform = "ppc750", .oprofile_cpu_type = "ppc/750", .oprofile_type = PPC_OPROFILE_G4, }, { /* 740/750 (L2CR bit need fixup for 740) */ .pvr_mask = 0xffff0000, .pvr_value = 0x00080000, .cpu_name = "740/750", .cpu_features = CPU_FTRS_740, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750, .machine_check = machine_check_generic, .platform = "ppc750", }, { /* 7400 rev 1.1 ? (no TAU) */ .pvr_mask = 0xffffffff, .pvr_value = 0x000c1101, .cpu_name = "7400 (1.1)", .cpu_features = CPU_FTRS_7400_NOTAU, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_7400, .machine_check = machine_check_generic, .platform = "ppc7400", }, { /* 7400 */ .pvr_mask = 0xffff0000, .pvr_value = 0x000c0000, .cpu_name = "7400", .cpu_features = CPU_FTRS_7400, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_7400, .machine_check = machine_check_generic, .platform = "ppc7400", }, { /* 7410 */ .pvr_mask = 0xffff0000, .pvr_value = 0x800c0000, .cpu_name = "7410", .cpu_features = CPU_FTRS_7400, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_7410, .machine_check = machine_check_generic, .platform = "ppc7400", }, { /* 7450 2.0 - no doze/nap */ .pvr_mask = 0xffffffff, .pvr_value = 0x80000200, .cpu_name = "7450", .cpu_features = CPU_FTRS_7450_20, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7450 2.1 */ .pvr_mask = 0xffffffff, .pvr_value = 0x80000201, .cpu_name = "7450", .cpu_features = CPU_FTRS_7450_21, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7450 2.3 and newer */ .pvr_mask = 0xffff0000, .pvr_value = 0x80000000, .cpu_name = "7450", .cpu_features = CPU_FTRS_7450_23, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7455 rev 1.x */ .pvr_mask = 0xffffff00, .pvr_value = 0x80010100, .cpu_name = "7455", .cpu_features = CPU_FTRS_7455_1, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7455 rev 2.0 */ .pvr_mask = 0xffffffff, .pvr_value = 0x80010200, .cpu_name = "7455", .cpu_features = CPU_FTRS_7455_20, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7455 others */ .pvr_mask = 0xffff0000, .pvr_value = 0x80010000, .cpu_name = "7455", .cpu_features = CPU_FTRS_7455, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7447/7457 Rev 1.0 */ .pvr_mask = 0xffffffff, .pvr_value = 0x80020100, .cpu_name = "7447/7457", .cpu_features = CPU_FTRS_7447_10, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7447/7457 Rev 1.1 */ .pvr_mask = 0xffffffff, .pvr_value = 0x80020101, .cpu_name = "7447/7457", .cpu_features = CPU_FTRS_7447_10, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7447/7457 Rev 1.2 and later */ .pvr_mask = 0xffff0000, .pvr_value = 0x80020000, .cpu_name = "7447/7457", .cpu_features = CPU_FTRS_7447, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7447A */ .pvr_mask = 0xffff0000, .pvr_value = 0x80030000, .cpu_name = "7447A", .cpu_features = CPU_FTRS_7447A, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7448 */ .pvr_mask = 0xffff0000, .pvr_value = 0x80040000, .cpu_name = "7448", .cpu_features = CPU_FTRS_7448, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 82xx (8240, 8245, 8260 are all 603e cores) */ .pvr_mask = 0x7fff0000, .pvr_value = 0x00810000, .cpu_name = "82xx", .cpu_features = CPU_FTRS_82XX, .cpu_user_features = COMMON_USER, .mmu_features = 0, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .machine_check = machine_check_generic, .platform = "ppc603", }, { /* All G2_LE (603e core, plus some) have the same pvr */ .pvr_mask = 0x7fff0000, .pvr_value = 0x00820000, .cpu_name = "G2_LE", .cpu_features = CPU_FTRS_G2_LE, .cpu_user_features = COMMON_USER, .mmu_features = MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .machine_check = machine_check_generic, .platform = "ppc603", }, { /* e300c1 (a 603e core, plus some) on 83xx */ .pvr_mask = 0x7fff0000, .pvr_value = 0x00830000, .cpu_name = "e300c1", .cpu_features = CPU_FTRS_E300, .cpu_user_features = COMMON_USER, .mmu_features = MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .machine_check = machine_check_generic, .platform = "ppc603", }, { /* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */ .pvr_mask = 0x7fff0000, .pvr_value = 0x00840000, .cpu_name = "e300c2", .cpu_features = CPU_FTRS_E300C2, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, .mmu_features = MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .machine_check = machine_check_generic, .platform = "ppc603", }, { /* e300c3 (e300c1, plus one IU, half cache size) on 83xx */ .pvr_mask = 0x7fff0000, .pvr_value = 0x00850000, .cpu_name = "e300c3", .cpu_features = CPU_FTRS_E300, .cpu_user_features = COMMON_USER, .mmu_features = MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .machine_check = machine_check_generic, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e300", .oprofile_type = PPC_OPROFILE_FSL_EMB, .platform = "ppc603", }, { /* e300c4 (e300c1, plus one IU) */ .pvr_mask = 0x7fff0000, .pvr_value = 0x00860000, .cpu_name = "e300c4", .cpu_features = CPU_FTRS_E300, .cpu_user_features = COMMON_USER, .mmu_features = MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .machine_check = machine_check_generic, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e300", .oprofile_type = PPC_OPROFILE_FSL_EMB, .platform = "ppc603", }, { /* default match, we assume split I/D cache & TB (non-601)... */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, .cpu_name = "(generic PPC)", .cpu_features = CPU_FTRS_CLASSIC32, .cpu_user_features = COMMON_USER, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_generic, .platform = "ppc603", }, #endif /* CONFIG_PPC_BOOK3S_32 */ #ifdef CONFIG_8xx { /* 8xx */ .pvr_mask = 0xffff0000, .pvr_value = 0x00500000, .cpu_name = "8xx", /* CPU_FTR_MAYBE_CAN_DOZE is possible, * if the 8xx code is there.... */ .cpu_features = CPU_FTRS_8XX, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, .mmu_features = MMU_FTR_TYPE_8xx, .icache_bsize = 16, .dcache_bsize = 16, .platform = "ppc823", }, #endif /* CONFIG_8xx */ #ifdef CONFIG_40x { /* 403GC */ .pvr_mask = 0xffffff00, .pvr_value = 0x00200200, .cpu_name = "403GC", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 16, .dcache_bsize = 16, .machine_check = machine_check_4xx, .platform = "ppc403", }, { /* 403GCX */ .pvr_mask = 0xffffff00, .pvr_value = 0x00201400, .cpu_name = "403GCX", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 16, .dcache_bsize = 16, .machine_check = machine_check_4xx, .platform = "ppc403", }, { /* 403G ?? */ .pvr_mask = 0xffff0000, .pvr_value = 0x00200000, .cpu_name = "403G ??", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 16, .dcache_bsize = 16, .machine_check = machine_check_4xx, .platform = "ppc403", }, { /* 405GP */ .pvr_mask = 0xffff0000, .pvr_value = 0x40110000, .cpu_name = "405GP", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* STB 03xxx */ .pvr_mask = 0xffff0000, .pvr_value = 0x40130000, .cpu_name = "STB03xxx", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* STB 04xxx */ .pvr_mask = 0xffff0000, .pvr_value = 0x41810000, .cpu_name = "STB04xxx", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* NP405L */ .pvr_mask = 0xffff0000, .pvr_value = 0x41610000, .cpu_name = "NP405L", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* NP4GS3 */ .pvr_mask = 0xffff0000, .pvr_value = 0x40B10000, .cpu_name = "NP4GS3", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* NP405H */ .pvr_mask = 0xffff0000, .pvr_value = 0x41410000, .cpu_name = "NP405H", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405GPr */ .pvr_mask = 0xffff0000, .pvr_value = 0x50910000, .cpu_name = "405GPr", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* STBx25xx */ .pvr_mask = 0xffff0000, .pvr_value = 0x51510000, .cpu_name = "STBx25xx", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405LP */ .pvr_mask = 0xffff0000, .pvr_value = 0x41F10000, .cpu_name = "405LP", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* Xilinx Virtex-II Pro */ .pvr_mask = 0xfffff000, .pvr_value = 0x20010000, .cpu_name = "Virtex-II Pro", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* Xilinx Virtex-4 FX */ .pvr_mask = 0xfffff000, .pvr_value = 0x20011000, .cpu_name = "Virtex-4 FX", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EP */ .pvr_mask = 0xffff0000, .pvr_value = 0x51210000, .cpu_name = "405EP", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EX Rev. A/B with Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x12910007, .cpu_name = "405EX Rev. A/B", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EX Rev. C without Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x1291000d, .cpu_name = "405EX Rev. C", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EX Rev. C with Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x1291000f, .cpu_name = "405EX Rev. C", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EX Rev. D without Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x12910003, .cpu_name = "405EX Rev. D", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EX Rev. D with Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x12910005, .cpu_name = "405EX Rev. D", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EXr Rev. A/B without Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x12910001, .cpu_name = "405EXr Rev. A/B", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EXr Rev. C without Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x12910009, .cpu_name = "405EXr Rev. C", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EXr Rev. C with Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x1291000b, .cpu_name = "405EXr Rev. C", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EXr Rev. D without Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x12910000, .cpu_name = "405EXr Rev. D", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EXr Rev. D with Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x12910002, .cpu_name = "405EXr Rev. D", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EZ */ .pvr_mask = 0xffff0000, .pvr_value = 0x41510000, .cpu_name = "405EZ", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* APM8018X */ .pvr_mask = 0xffff0000, .pvr_value = 0x7ff11432, .cpu_name = "APM8018X", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, .cpu_name = "(generic 40x PPC)", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", } #endif /* CONFIG_40x */ #ifdef CONFIG_44x { .pvr_mask = 0xf0000fff, .pvr_value = 0x40000850, .cpu_name = "440GR Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc440", }, { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */ .pvr_mask = 0xf0000fff, .pvr_value = 0x40000858, .cpu_name = "440EP Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440ep, .machine_check = machine_check_4xx, .platform = "ppc440", }, { .pvr_mask = 0xf0000fff, .pvr_value = 0x400008d3, .cpu_name = "440GR Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc440", }, { /* Matches both physical and logical PVR for 440EP (logical pvr = pvr | 0x8) */ .pvr_mask = 0xf0000ff7, .pvr_value = 0x400008d4, .cpu_name = "440EP Rev. C", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440ep, .machine_check = machine_check_4xx, .platform = "ppc440", }, { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */ .pvr_mask = 0xf0000fff, .pvr_value = 0x400008db, .cpu_name = "440EP Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440ep, .machine_check = machine_check_4xx, .platform = "ppc440", }, { /* 440GRX */ .pvr_mask = 0xf0000ffb, .pvr_value = 0x200008D0, .cpu_name = "440GRX", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440grx, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* Use logical PVR for 440EPx (logical pvr = pvr | 0x8) */ .pvr_mask = 0xf0000ffb, .pvr_value = 0x200008D8, .cpu_name = "440EPX", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440epx, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 440GP Rev. B */ .pvr_mask = 0xf0000fff, .pvr_value = 0x40000440, .cpu_name = "440GP Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc440gp", }, { /* 440GP Rev. C */ .pvr_mask = 0xf0000fff, .pvr_value = 0x40000481, .cpu_name = "440GP Rev. C", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc440gp", }, { /* 440GX Rev. A */ .pvr_mask = 0xf0000fff, .pvr_value = 0x50000850, .cpu_name = "440GX Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440gx, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 440GX Rev. B */ .pvr_mask = 0xf0000fff, .pvr_value = 0x50000851, .cpu_name = "440GX Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440gx, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 440GX Rev. C */ .pvr_mask = 0xf0000fff, .pvr_value = 0x50000892, .cpu_name = "440GX Rev. C", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440gx, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 440GX Rev. F */ .pvr_mask = 0xf0000fff, .pvr_value = 0x50000894, .cpu_name = "440GX Rev. F", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440gx, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 440SP Rev. A */ .pvr_mask = 0xfff00fff, .pvr_value = 0x53200891, .cpu_name = "440SP Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc440", }, { /* 440SPe Rev. A */ .pvr_mask = 0xfff00fff, .pvr_value = 0x53400890, .cpu_name = "440SPe Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440spe, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 440SPe Rev. B */ .pvr_mask = 0xfff00fff, .pvr_value = 0x53400891, .cpu_name = "440SPe Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440spe, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 440 in Xilinx Virtex-5 FXT */ .pvr_mask = 0xfffffff0, .pvr_value = 0x7ff21910, .cpu_name = "440 in Virtex-5 FXT", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440x5, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 460EX */ .pvr_mask = 0xffff0006, .pvr_value = 0x13020002, .cpu_name = "460EX", .cpu_features = CPU_FTRS_440x6, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_460ex, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 460EX Rev B */ .pvr_mask = 0xffff0007, .pvr_value = 0x13020004, .cpu_name = "460EX Rev. B", .cpu_features = CPU_FTRS_440x6, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_460ex, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 460GT */ .pvr_mask = 0xffff0006, .pvr_value = 0x13020000, .cpu_name = "460GT", .cpu_features = CPU_FTRS_440x6, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_460gt, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 460GT Rev B */ .pvr_mask = 0xffff0007, .pvr_value = 0x13020005, .cpu_name = "460GT Rev. B", .cpu_features = CPU_FTRS_440x6, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_460gt, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 460SX */ .pvr_mask = 0xffffff00, .pvr_value = 0x13541800, .cpu_name = "460SX", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_460sx, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 464 in APM821xx */ .pvr_mask = 0xfffffff0, .pvr_value = 0x12C41C80, .cpu_name = "APM821XX", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_apm821xx, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 476 DD2 core */ .pvr_mask = 0xffffffff, .pvr_value = 0x11a52080, .cpu_name = "476", .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, .icache_bsize = 32, .dcache_bsize = 128, .machine_check = machine_check_47x, .platform = "ppc470", }, { /* 476fpe */ .pvr_mask = 0xffff0000, .pvr_value = 0x7ff50000, .cpu_name = "476fpe", .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, .icache_bsize = 32, .dcache_bsize = 128, .machine_check = machine_check_47x, .platform = "ppc470", }, { /* 476 iss */ .pvr_mask = 0xffff0000, .pvr_value = 0x00050000, .cpu_name = "476", .cpu_features = CPU_FTRS_47X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, .icache_bsize = 32, .dcache_bsize = 128, .machine_check = machine_check_47x, .platform = "ppc470", }, { /* 476 others */ .pvr_mask = 0xffff0000, .pvr_value = 0x11a50000, .cpu_name = "476", .cpu_features = CPU_FTRS_47X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, .icache_bsize = 32, .dcache_bsize = 128, .machine_check = machine_check_47x, .platform = "ppc470", }, { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, .cpu_name = "(generic 44x PPC)", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc440", } #endif /* CONFIG_44x */ #ifdef CONFIG_E200 { /* e200z5 */ .pvr_mask = 0xfff00000, .pvr_value = 0x81000000, .cpu_name = "e200z5", /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ .cpu_features = CPU_FTRS_E200, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_UNIFIED_CACHE, .mmu_features = MMU_FTR_TYPE_FSL_E, .dcache_bsize = 32, .machine_check = machine_check_e200, .platform = "ppc5554", }, { /* e200z6 */ .pvr_mask = 0xfff00000, .pvr_value = 0x81100000, .cpu_name = "e200z6", /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ .cpu_features = CPU_FTRS_E200, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE_COMP | PPC_FEATURE_UNIFIED_CACHE, .mmu_features = MMU_FTR_TYPE_FSL_E, .dcache_bsize = 32, .machine_check = machine_check_e200, .platform = "ppc5554", }, { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, .cpu_name = "(generic E200 PPC)", .cpu_features = CPU_FTRS_E200, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_UNIFIED_CACHE, .mmu_features = MMU_FTR_TYPE_FSL_E, .dcache_bsize = 32, .cpu_setup = __setup_cpu_e200, .machine_check = machine_check_e200, .platform = "ppc5554", } #endif /* CONFIG_E200 */ #endif /* CONFIG_PPC32 */ #ifdef CONFIG_E500 #ifdef CONFIG_PPC32 #ifndef CONFIG_PPC_E500MC { /* e500 */ .pvr_mask = 0xffff0000, .pvr_value = 0x80200000, .cpu_name = "e500", .cpu_features = CPU_FTRS_E500, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE_COMP, .cpu_user_features2 = PPC_FEATURE2_ISEL, .mmu_features = MMU_FTR_TYPE_FSL_E, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e500", .oprofile_type = PPC_OPROFILE_FSL_EMB, .cpu_setup = __setup_cpu_e500v1, .machine_check = machine_check_e500, .platform = "ppc8540", }, { /* e500v2 */ .pvr_mask = 0xffff0000, .pvr_value = 0x80210000, .cpu_name = "e500v2", .cpu_features = CPU_FTRS_E500_2, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE_COMP | PPC_FEATURE_HAS_EFP_DOUBLE_COMP, .cpu_user_features2 = PPC_FEATURE2_ISEL, .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e500", .oprofile_type = PPC_OPROFILE_FSL_EMB, .cpu_setup = __setup_cpu_e500v2, .machine_check = machine_check_e500, .platform = "ppc8548", }, #else { /* e500mc */ .pvr_mask = 0xffff0000, .pvr_value = 0x80230000, .cpu_name = "e500mc", .cpu_features = CPU_FTRS_E500MC, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .cpu_user_features2 = PPC_FEATURE2_ISEL, .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e500mc", .oprofile_type = PPC_OPROFILE_FSL_EMB, .cpu_setup = __setup_cpu_e500mc, .machine_check = machine_check_e500mc, .platform = "ppce500mc", }, #endif /* CONFIG_PPC_E500MC */ #endif /* CONFIG_PPC32 */ #ifdef CONFIG_PPC_E500MC { /* e5500 */ .pvr_mask = 0xffff0000, .pvr_value = 0x80240000, .cpu_name = "e5500", .cpu_features = CPU_FTRS_E5500, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .cpu_user_features2 = PPC_FEATURE2_ISEL, .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e500mc", .oprofile_type = PPC_OPROFILE_FSL_EMB, .cpu_setup = __setup_cpu_e5500, #ifndef CONFIG_PPC32 .cpu_restore = __restore_cpu_e5500, #endif .machine_check = machine_check_e500mc, .platform = "ppce5500", }, { /* e6500 */ .pvr_mask = 0xffff0000, .pvr_value = 0x80400000, .cpu_name = "e6500", .cpu_features = CPU_FTRS_E6500, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU | PPC_FEATURE_HAS_ALTIVEC_COMP, .cpu_user_features2 = PPC_FEATURE2_ISEL, .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 6, .oprofile_cpu_type = "ppc/e6500", .oprofile_type = PPC_OPROFILE_FSL_EMB, .cpu_setup = __setup_cpu_e6500, #ifndef CONFIG_PPC32 .cpu_restore = __restore_cpu_e6500, #endif .machine_check = machine_check_e500mc, .platform = "ppce6500", }, #endif /* CONFIG_PPC_E500MC */ #ifdef CONFIG_PPC32 { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, .cpu_name = "(generic E500 PPC)", .cpu_features = CPU_FTRS_E500, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE_COMP, .mmu_features = MMU_FTR_TYPE_FSL_E, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_e500, .platform = "powerpc", } #endif /* CONFIG_PPC32 */ #endif /* CONFIG_E500 */ }; static struct cpu_spec the_cpu_spec; static struct cpu_spec * __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) { struct cpu_spec *t = &the_cpu_spec; struct cpu_spec old; t = PTRRELOC(t); old = *t; /* Copy everything, then do fixups */ *t = *s; /* * If we are overriding a previous value derived from the real * PVR with a new value obtained using a logical PVR value, * don't modify the performance monitor fields. */ if (old.num_pmcs && !s->num_pmcs) { t->num_pmcs = old.num_pmcs; t->pmc_type = old.pmc_type; t->oprofile_type = old.oprofile_type; t->oprofile_mmcra_sihv = old.oprofile_mmcra_sihv; t->oprofile_mmcra_sipr = old.oprofile_mmcra_sipr; t->oprofile_mmcra_clear = old.oprofile_mmcra_clear; /* * If we have passed through this logic once before and * have pulled the default case because the real PVR was * not found inside cpu_specs[], then we are possibly * running in compatibility mode. In that case, let the * oprofiler know which set of compatibility counters to * pull from by making sure the oprofile_cpu_type string * is set to that of compatibility mode. If the * oprofile_cpu_type already has a value, then we are * possibly overriding a real PVR with a logical one, * and, in that case, keep the current value for * oprofile_cpu_type. */ if (old.oprofile_cpu_type != NULL) { t->oprofile_cpu_type = old.oprofile_cpu_type; t->oprofile_type = old.oprofile_type; } } *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec; /* * Set the base platform string once; assumes * we're called with real pvr first. */ if (*PTRRELOC(&powerpc_base_platform) == NULL) *PTRRELOC(&powerpc_base_platform) = t->platform; #if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE) /* ppc64 and booke expect identify_cpu to also call setup_cpu for * that processor. I will consolidate that at a later time, for now, * just use #ifdef. We also don't need to PTRRELOC the function * pointer on ppc64 and booke as we are running at 0 in real mode * on ppc64 and reloc_offset is always 0 on booke. */ if (t->cpu_setup) { t->cpu_setup(offset, t); } #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ return t; } struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) { struct cpu_spec *s = cpu_specs; int i; s = PTRRELOC(s); for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) { if ((pvr & s->pvr_mask) == s->pvr_value) return setup_cpu_spec(offset, s); } BUG(); return NULL; }
gpl-2.0
deedwar/kernel_sony_msm8x60
drivers/video/msm/mhl/mhl_8334.c
587
33011
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/types.h> #include <linux/bitops.h> #include <linux/clk.h> #include <linux/mutex.h> #include <mach/msm_hdmi_audio.h> #include <mach/clk.h> #include <mach/msm_iomap.h> #include <mach/socinfo.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/regulator/consumer.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mhl_8334.h> #include "msm_fb.h" #include "external_common.h" #include "hdmi_msm.h" #include "mhl_i2c_utils.h" static struct i2c_device_id mhl_sii_i2c_id[] = { { MHL_DRIVER_NAME, 0 }, { } }; struct mhl_msm_state_t *mhl_msm_state; spinlock_t mhl_state_lock; static int mhl_i2c_probe(struct i2c_client *client,\ const struct i2c_device_id *id); static int mhl_i2c_remove(struct i2c_client *client); static void force_usb_switch_open(void); static void release_usb_switch_open(void); static void switch_mode(enum mhl_st_type to_mode); static irqreturn_t mhl_tx_isr(int irq, void *dev_id); void (*notify_usb_online)(int online); static void mhl_drive_hpd(uint8_t to_state); static struct i2c_driver mhl_sii_i2c_driver = { .driver = { .name = MHL_DRIVER_NAME, .owner = THIS_MODULE, }, .probe = mhl_i2c_probe, /*.remove = __exit_p(mhl_i2c_remove),*/ .remove = mhl_i2c_remove, .id_table = mhl_sii_i2c_id, }; static void mhl_sii_reset_pin(int on) { gpio_set_value(mhl_msm_state->mhl_data->gpio_mhl_reset, on); return; } static int mhl_sii_reg_enable(void) { static struct regulator *reg_8038_l20; static struct regulator *reg_8038_l11; int rc; pr_debug("Inside %s\n", __func__); if (!reg_8038_l20) { reg_8038_l20 = regulator_get(&mhl_msm_state->i2c_client->dev, "mhl_avcc12"); if (IS_ERR(reg_8038_l20)) { pr_err("could not get reg_8038_l20, rc = %ld\n", PTR_ERR(reg_8038_l20)); return -ENODEV; } rc = regulator_enable(reg_8038_l20); if (rc) { pr_err("'%s' regulator enable failed, rc=%d\n", "mhl_l20", rc); return rc; } else pr_debug("REGULATOR L20 ENABLED\n"); } if (!reg_8038_l11) { reg_8038_l11 = regulator_get(&mhl_msm_state->i2c_client->dev, "mhl_iovcc18"); if (IS_ERR(reg_8038_l11)) { pr_err("could not get reg_8038_l11, rc = %ld\n", PTR_ERR(reg_8038_l11)); return -ENODEV; } rc = regulator_enable(reg_8038_l11); if (rc) { pr_err("'%s' regulator enable failed, rc=%d\n", "mhl_l11", rc); return rc; } else pr_debug("REGULATOR L11 ENABLED\n"); } return rc; } static void mhl_sii_power_on(void) { int ret; pr_debug("MHL SII POWER ON\n"); if (!mhl_msm_state->mhl_data->gpio_mhl_power) { pr_warn("%s: no power reqd for this platform\n", __func__); return; } ret = gpio_request(mhl_msm_state->mhl_data->gpio_mhl_power, "W_PWR"); if (ret < 0) { pr_err("MHL_POWER_GPIO req failed: %d\n", ret); return; } ret = gpio_direction_output(mhl_msm_state->mhl_data->gpio_mhl_power, 1); if (ret < 0) { pr_err( "SET GPIO MHL_POWER_GPIO direction failed: %d\n", ret); gpio_free(mhl_msm_state->mhl_data->gpio_mhl_power); return; } gpio_set_value(mhl_msm_state->mhl_data->gpio_mhl_power, 1); if (mhl_sii_reg_enable()) pr_err("Regulator enable failed\n"); pr_debug("MHL SII POWER ON Successful\n"); return; } /* * Request for GPIO allocations * Set appropriate GPIO directions */ static int mhl_sii_gpio_setup(int on) { int ret; if (on) { if (mhl_msm_state->mhl_data->gpio_hdmi_mhl_mux) { ret = gpio_request(mhl_msm_state->\ mhl_data->gpio_hdmi_mhl_mux, "W_MUX"); if (ret < 0) { pr_err("GPIO HDMI_MHL MUX req failed:%d\n", ret); return -EBUSY; } ret = gpio_direction_output( mhl_msm_state->mhl_data->gpio_hdmi_mhl_mux, 0); if (ret < 0) { pr_err("SET GPIO HDMI_MHL dir failed:%d\n", ret); gpio_free(mhl_msm_state->\ mhl_data->gpio_hdmi_mhl_mux); return -EBUSY; } msleep(50); gpio_set_value(mhl_msm_state->\ mhl_data->gpio_hdmi_mhl_mux, 0); pr_debug("SET GPIO HDMI MHL MUX %d to 0\n", mhl_msm_state->mhl_data->gpio_hdmi_mhl_mux); } ret = gpio_request(mhl_msm_state->mhl_data->gpio_mhl_reset, "W_RST#"); if (ret < 0) { pr_err("GPIO RESET request failed: %d\n", ret); return -EBUSY; } ret = gpio_direction_output(mhl_msm_state->\ mhl_data->gpio_mhl_reset, 1); if (ret < 0) { pr_err("SET GPIO RESET direction failed: %d\n", ret); gpio_free(mhl_msm_state->mhl_data->gpio_mhl_reset); gpio_free(mhl_msm_state->mhl_data->gpio_hdmi_mhl_mux); return -EBUSY; } ret = gpio_request(mhl_msm_state->mhl_data->gpio_mhl_int, "W_INT"); if (ret < 0) { pr_err("GPIO INT request failed: %d\n", ret); gpio_free(mhl_msm_state->mhl_data->gpio_mhl_reset); gpio_free(mhl_msm_state->mhl_data->gpio_hdmi_mhl_mux); return -EBUSY; } ret = gpio_direction_input(mhl_msm_state->\ mhl_data->gpio_mhl_int); if (ret < 0) { pr_err("SET GPIO INTR direction failed: %d\n", ret); gpio_free(mhl_msm_state->mhl_data->gpio_mhl_reset); gpio_free(mhl_msm_state->mhl_data->gpio_mhl_int); gpio_free(mhl_msm_state->mhl_data->gpio_hdmi_mhl_mux); return -EBUSY; } } else { gpio_free(mhl_msm_state->mhl_data->gpio_mhl_reset); gpio_free(mhl_msm_state->mhl_data->gpio_mhl_int); gpio_free(mhl_msm_state->mhl_data->gpio_hdmi_mhl_mux); gpio_free(mhl_msm_state->mhl_data->gpio_mhl_power); } return 0; } bool mhl_is_connected(void) { return true; } /* USB_HANDSHAKING FUNCTIONS */ int mhl_device_discovery(const char *name, int *result) { int timeout ; mhl_i2c_reg_write(TX_PAGE_3, 0x0010, 0x27); msleep(50); if (mhl_msm_state->cur_state == POWER_STATE_D3) { /* give MHL driver chance to handle RGND interrupt */ INIT_COMPLETION(mhl_msm_state->rgnd_done); timeout = wait_for_completion_interruptible_timeout (&mhl_msm_state->rgnd_done, HZ/2); if (!timeout) { /* most likely nothing plugged in USB */ /* USB HOST connected or already in USB mode */ pr_debug("Timedout Returning from discovery mode\n"); *result = MHL_DISCOVERY_RESULT_USB; return 0; } *result = mhl_msm_state->mhl_mode ? MHL_DISCOVERY_RESULT_MHL : MHL_DISCOVERY_RESULT_USB; } else /* not in D3. already in MHL mode */ *result = MHL_DISCOVERY_RESULT_MHL; return 0; } EXPORT_SYMBOL(mhl_device_discovery); int mhl_register_callback(const char *name, void (*callback)(int online)) { pr_debug("%s\n", __func__); if (!callback) return -EINVAL; if (!notify_usb_online) notify_usb_online = callback; return 0; } EXPORT_SYMBOL(mhl_register_callback); int mhl_unregister_callback(const char *name) { pr_debug("%s\n", __func__); if (notify_usb_online) notify_usb_online = NULL; return 0; } EXPORT_SYMBOL(mhl_unregister_callback); static void cbus_reset(void) { uint8_t i; /* * REG_SRST */ mhl_i2c_reg_modify(TX_PAGE_3, 0x0000, BIT3, BIT3); msleep(20); mhl_i2c_reg_modify(TX_PAGE_3, 0x0000, BIT3, 0x00); /* * REG_INTR1 and REG_INTR4 */ mhl_i2c_reg_write(TX_PAGE_L0, 0x0075, BIT6); mhl_i2c_reg_write(TX_PAGE_3, 0x0022, BIT0 | BIT2 | BIT3 | BIT4 | BIT5 | BIT6); /* REG5 */ if (mhl_msm_state->chip_rev_id < 1) mhl_i2c_reg_write(TX_PAGE_3, 0x0024, BIT3 | BIT4); else /*REG5 Mask disabled due to auto FIFO reset ??*/ mhl_i2c_reg_write(TX_PAGE_3, 0x0024, 0x00); /* Unmask CBUS1 Intrs */ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0009, BIT2 | BIT3 | BIT4 | BIT5 | BIT6); /* Unmask CBUS2 Intrs */ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x001F, BIT2 | BIT3); for (i = 0; i < 4; i++) { /* * Enable WRITE_STAT interrupt for writes to * all 4 MSC Status registers. */ mhl_i2c_reg_write(TX_PAGE_CBUS, (0xE0 + i), 0xFF); /* * Enable SET_INT interrupt for writes to * all 4 MSC Interrupt registers. */ mhl_i2c_reg_write(TX_PAGE_CBUS, (0xF0 + i), 0xFF); } } static void init_cbus_regs(void) { uint8_t regval; /* Increase DDC translation layer timer*/ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0007, 0xF2); /* Drive High Time */ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0036, 0x03); /* Use programmed timing */ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0039, 0x30); /* CBUS Drive Strength */ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0040, 0x03); /* * Write initial default settings * to devcap regs: default settings */ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0080 | DEVCAP_OFFSET_DEV_STATE, DEVCAP_VAL_DEV_STATE); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0080 | DEVCAP_OFFSET_MHL_VERSION, DEVCAP_VAL_MHL_VERSION); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0080 | DEVCAP_OFFSET_DEV_CAT, DEVCAP_VAL_DEV_CAT); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0080 | DEVCAP_OFFSET_ADOPTER_ID_H, DEVCAP_VAL_ADOPTER_ID_H); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0080 | DEVCAP_OFFSET_ADOPTER_ID_L, DEVCAP_VAL_ADOPTER_ID_L); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0080 | DEVCAP_OFFSET_VID_LINK_MODE, DEVCAP_VAL_VID_LINK_MODE); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0080 | DEVCAP_OFFSET_AUD_LINK_MODE, DEVCAP_VAL_AUD_LINK_MODE); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0080 | DEVCAP_OFFSET_VIDEO_TYPE, DEVCAP_VAL_VIDEO_TYPE); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0080 | DEVCAP_OFFSET_LOG_DEV_MAP, DEVCAP_VAL_LOG_DEV_MAP); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0080 | DEVCAP_OFFSET_BANDWIDTH, DEVCAP_VAL_BANDWIDTH); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0080 | DEVCAP_OFFSET_FEATURE_FLAG, DEVCAP_VAL_FEATURE_FLAG); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0080 | DEVCAP_OFFSET_DEVICE_ID_H, DEVCAP_VAL_DEVICE_ID_H); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0080 | DEVCAP_OFFSET_DEVICE_ID_L, DEVCAP_VAL_DEVICE_ID_L); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0080 | DEVCAP_OFFSET_SCRATCHPAD_SIZE, DEVCAP_VAL_SCRATCHPAD_SIZE); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0080 | DEVCAP_OFFSET_INT_STAT_SIZE, DEVCAP_VAL_INT_STAT_SIZE); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0080 | DEVCAP_OFFSET_RESERVED, DEVCAP_VAL_RESERVED); /* Make bits 2,3 (initiator timeout) to 1,1 * for register CBUS_LINK_CONTROL_2 * REG_CBUS_LINK_CONTROL_2 */ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x0031); regval = (regval | 0x0C); /* REG_CBUS_LINK_CONTROL_2 */ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0031, regval); /* REG_MSC_TIMEOUT_LIMIT */ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0022, 0x0F); /* REG_CBUS_LINK_CONTROL_1 */ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0030, 0x01); /* disallow vendor specific commands */ mhl_i2c_reg_modify(TX_PAGE_CBUS, 0x002E, BIT4, BIT4); } /* * Configure the initial reg settings */ static void mhl_init_reg_settings(bool mhl_disc_en) { /* * ============================================ * POWER UP * ============================================ */ /* Power up 1.2V core */ mhl_i2c_reg_write(TX_PAGE_L1, 0x003D, 0x3F); /* * Wait for the source power to be enabled * before enabling pll clocks. */ msleep(50); /* Enable Tx PLL Clock */ mhl_i2c_reg_write(TX_PAGE_2, 0x0011, 0x01); /* Enable Tx Clock Path and Equalizer */ mhl_i2c_reg_write(TX_PAGE_2, 0x0012, 0x11); /* Tx Source Termination ON */ mhl_i2c_reg_write(TX_PAGE_3, 0x0030, 0x10); /* Enable 1X MHL Clock output */ mhl_i2c_reg_write(TX_PAGE_3, 0x0035, 0xAC); /* Tx Differential Driver Config */ mhl_i2c_reg_write(TX_PAGE_3, 0x0031, 0x3C); mhl_i2c_reg_write(TX_PAGE_3, 0x0033, 0xD9); /* PLL Bandwidth Control */ mhl_i2c_reg_write(TX_PAGE_3, 0x0037, 0x02); /* * ============================================ * Analog PLL Control * ============================================ */ /* Enable Rx PLL clock */ mhl_i2c_reg_write(TX_PAGE_L0, 0x0080, 0x00); mhl_i2c_reg_write(TX_PAGE_L0, 0x00F8, 0x0C); mhl_i2c_reg_write(TX_PAGE_L0, 0x0085, 0x02); mhl_i2c_reg_write(TX_PAGE_2, 0x0000, 0x00); mhl_i2c_reg_write(TX_PAGE_2, 0x0013, 0x60); /* PLL Cal ref sel */ mhl_i2c_reg_write(TX_PAGE_2, 0x0017, 0x03); /* VCO Cal */ mhl_i2c_reg_write(TX_PAGE_2, 0x001A, 0x20); /* Auto EQ */ mhl_i2c_reg_write(TX_PAGE_2, 0x0022, 0xE0); mhl_i2c_reg_write(TX_PAGE_2, 0x0023, 0xC0); mhl_i2c_reg_write(TX_PAGE_2, 0x0024, 0xA0); mhl_i2c_reg_write(TX_PAGE_2, 0x0025, 0x80); mhl_i2c_reg_write(TX_PAGE_2, 0x0026, 0x60); mhl_i2c_reg_write(TX_PAGE_2, 0x0027, 0x40); mhl_i2c_reg_write(TX_PAGE_2, 0x0028, 0x20); mhl_i2c_reg_write(TX_PAGE_2, 0x0029, 0x00); /* Rx PLL Bandwidth 4MHz */ mhl_i2c_reg_write(TX_PAGE_2, 0x0031, 0x0A); /* Rx PLL Bandwidth value from I2C */ mhl_i2c_reg_write(TX_PAGE_2, 0x0045, 0x06); mhl_i2c_reg_write(TX_PAGE_2, 0x004B, 0x06); /* Manual zone control */ mhl_i2c_reg_write(TX_PAGE_2, 0x004C, 0xE0); /* PLL Mode value */ mhl_i2c_reg_write(TX_PAGE_2, 0x004D, 0x00); mhl_i2c_reg_write(TX_PAGE_L0, 0x0008, 0x35); /* * Discovery Control and Status regs * Setting De-glitch time to 50 ms (default) * Switch Control Disabled */ mhl_i2c_reg_write(TX_PAGE_3, 0x0011, 0xAD); /* 1.8V CBUS VTH */ mhl_i2c_reg_write(TX_PAGE_3, 0x0014, 0x55); /* RGND and single Discovery attempt */ mhl_i2c_reg_write(TX_PAGE_3, 0x0015, 0x11); /* Ignore VBUS */ mhl_i2c_reg_write(TX_PAGE_3, 0x0017, 0x82); mhl_i2c_reg_write(TX_PAGE_3, 0x0018, 0x24); /* Pull-up resistance off for IDLE state */ mhl_i2c_reg_write(TX_PAGE_3, 0x0013, 0x8C); /* Enable CBUS Discovery */ if (mhl_disc_en) /* Enable MHL Discovery */ mhl_i2c_reg_write(TX_PAGE_3, 0x0010, 0x27); else /* Disable MHL Discovery */ mhl_i2c_reg_write(TX_PAGE_3, 0x0010, 0x26); mhl_i2c_reg_write(TX_PAGE_3, 0x0016, 0x20); /* MHL CBUS Discovery - immediate comm. */ mhl_i2c_reg_write(TX_PAGE_3, 0x0012, 0x86); /* Do not force HPD to 0 during wake-up from D3 */ if (mhl_msm_state->cur_state != POWER_STATE_D0_MHL) mhl_drive_hpd(HPD_DOWN); /* Enable Auto Soft RESET */ mhl_i2c_reg_write(TX_PAGE_3, 0x0000, 0x084); /* HDMI Transcode mode enable */ mhl_i2c_reg_write(TX_PAGE_L0, 0x000D, 0x1C); cbus_reset(); init_cbus_regs(); } static int mhl_chip_init(void) { /* Read the chip rev ID */ mhl_msm_state->chip_rev_id = mhl_i2c_reg_read(TX_PAGE_L0, 0x04); pr_debug("MHL: chip rev ID read=[%x]\n", mhl_msm_state->chip_rev_id); /* Reset the TX chip */ mhl_sii_reset_pin(1); msleep(20); mhl_sii_reset_pin(0); msleep(20); mhl_sii_reset_pin(1); /* MHL spec requires a 100 ms wait here. */ msleep(100); /* * Need to disable MHL discovery */ mhl_init_reg_settings(true); /* * Power down the chip to the * D3 - a low power standby mode * cable impedance measurement logic is operational */ switch_mode(POWER_STATE_D3); return 0; } /* * I2C probe */ static int mhl_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret = -ENODEV; mhl_msm_state->mhl_data = kzalloc(sizeof(struct msm_mhl_platform_data), GFP_KERNEL); if (!(mhl_msm_state->mhl_data)) { ret = -ENOMEM; pr_err("MHL I2C Probe failed - no mem\n"); goto probe_exit; } mhl_msm_state->i2c_client = client; spin_lock_init(&mhl_state_lock); i2c_set_clientdata(client, mhl_msm_state); mhl_msm_state->mhl_data = client->dev.platform_data; pr_debug("MHL: mhl_msm_state->mhl_data->irq=[%d]\n", mhl_msm_state->mhl_data->irq); /* Init GPIO stuff here */ ret = mhl_sii_gpio_setup(1); if (ret == -1) { pr_err("MHL: mhl_gpio_init has failed\n"); ret = -ENODEV; goto probe_exit; } mhl_sii_power_on(); pr_debug("I2C PROBE successful\n"); return 0; probe_exit: if (mhl_msm_state->mhl_data) { /* free the gpios */ mhl_sii_gpio_setup(0); kfree(mhl_msm_state->mhl_data); mhl_msm_state->mhl_data = NULL; } return ret; } static int mhl_i2c_remove(struct i2c_client *client) { pr_debug("%s\n", __func__); mhl_sii_gpio_setup(0); kfree(mhl_msm_state->mhl_data); return 0; } static int __init mhl_msm_init(void) { int32_t ret; pr_debug("%s\n", __func__); mhl_msm_state = kzalloc(sizeof(struct mhl_msm_state_t), GFP_KERNEL); if (!mhl_msm_state) { pr_err("mhl_msm_init FAILED: out of memory\n"); ret = -ENOMEM; goto init_exit; } mhl_msm_state->i2c_client = NULL; ret = i2c_add_driver(&mhl_sii_i2c_driver); if (ret) { pr_err("MHL: I2C driver add failed: %d\n", ret); ret = -ENODEV; goto init_exit; } else { if (mhl_msm_state->i2c_client == NULL) { pr_err("MHL: I2C driver add failed\n"); ret = -ENODEV; goto init_exit; } pr_info("MHL: I2C driver added\n"); } /* Request IRQ stuff here */ pr_debug("MHL: mhl_msm_state->mhl_data->irq=[%d]\n", mhl_msm_state->mhl_data->irq); ret = request_threaded_irq(mhl_msm_state->mhl_data->irq, NULL, &mhl_tx_isr, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "mhl_tx_isr", mhl_msm_state); if (ret != 0) { pr_err("request_threaded_irq failed, status: %d\n", ret); ret = -EACCES; /* Error code???? */ goto init_exit; } else pr_debug("request_threaded_irq succeeded\n"); mhl_msm_state->cur_state = POWER_STATE_D0_MHL; /* MHL SII 8334 chip specific init */ mhl_chip_init(); init_completion(&mhl_msm_state->rgnd_done); return 0; init_exit: pr_err("Exiting from the init with err\n"); i2c_del_driver(&mhl_sii_i2c_driver); if (!mhl_msm_state) { kfree(mhl_msm_state); mhl_msm_state = NULL; } return ret; } static void switch_mode(enum mhl_st_type to_mode) { unsigned long flags; switch (to_mode) { case POWER_STATE_D0_NO_MHL: break; case POWER_STATE_D0_MHL: mhl_init_reg_settings(true); /* REG_DISC_CTRL1 */ mhl_i2c_reg_modify(TX_PAGE_3, 0x0010, BIT1 | BIT0, BIT0); /* * TPI_DEVICE_POWER_STATE_CTRL_REG * TX_POWER_STATE_MASK = BIT1 | BIT0 */ mhl_i2c_reg_modify(TX_PAGE_TPI, 0x001E, BIT1 | BIT0, 0x00); break; case POWER_STATE_D3: if (mhl_msm_state->cur_state != POWER_STATE_D3) { /* Force HPD to 0 when not in MHL mode. */ mhl_drive_hpd(HPD_DOWN); /* * Change TMDS termination to high impedance * on disconnection. */ mhl_i2c_reg_write(TX_PAGE_3, 0x0030, 0xD0); msleep(50); mhl_i2c_reg_modify(TX_PAGE_3, 0x0010, BIT1 | BIT0, BIT1); spin_lock_irqsave(&mhl_state_lock, flags); mhl_msm_state->cur_state = POWER_STATE_D3; spin_unlock_irqrestore(&mhl_state_lock, flags); } break; default: break; } } static void mhl_drive_hpd(uint8_t to_state) { if (mhl_msm_state->cur_state != POWER_STATE_D0_MHL) { pr_err("MHL: invalid state to ctrl HPD\n"); return; } pr_debug("%s: To state=[0x%x]\n", __func__, to_state); if (to_state == HPD_UP) { /* * Drive HPD to UP state * * The below two reg configs combined * enable TMDS output. */ /* Enable TMDS on TMDS_CCTRL */ mhl_i2c_reg_modify(TX_PAGE_L0, 0x0080, BIT4, BIT4); /* * Set HPD_OUT_OVR_EN = HPD State * EDID read and Un-force HPD (from low) * propogate to src let HPD float by clearing * HPD OUT OVRRD EN */ mhl_i2c_reg_modify(TX_PAGE_3, 0x0020, BIT4, 0x00); } else { /* * Drive HPD to DOWN state * Disable TMDS Output on REG_TMDS_CCTRL * Enable/Disable TMDS output (MHL TMDS output only) */ mhl_i2c_reg_modify(TX_PAGE_3, 0x20, BIT4 | BIT5, BIT4); mhl_i2c_reg_modify(TX_PAGE_L0, 0x0080, BIT4, 0x00); } return; } static void mhl_msm_connection(void) { uint8_t val; unsigned long flags; pr_debug("%s: cur state = [0x%x]\n", __func__, mhl_msm_state->cur_state); if (mhl_msm_state->cur_state == POWER_STATE_D0_MHL) { /* Already in D0 - MHL power state */ return; } spin_lock_irqsave(&mhl_state_lock, flags); mhl_msm_state->cur_state = POWER_STATE_D0_MHL; spin_unlock_irqrestore(&mhl_state_lock, flags); mhl_i2c_reg_write(TX_PAGE_3, 0x30, 0x10); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x07, 0xF2); /* * Keep the discovery enabled. Need RGND interrupt * Possibly chip disables discovery after MHL_EST?? * Need to re-enable here */ val = mhl_i2c_reg_read(TX_PAGE_3, 0x10); mhl_i2c_reg_write(TX_PAGE_3, 0x10, val | BIT(0)); return; } static void mhl_msm_disconnection(void) { /* * MHL TX CTL1 * Disabling Tx termination */ mhl_i2c_reg_write(TX_PAGE_3, 0x30, 0xD0); /* Change HPD line to drive it low */ mhl_drive_hpd(HPD_DOWN); /* switch power state to D3 */ switch_mode(POWER_STATE_D3); return; } /* * If hardware detected a change in impedance and raised an INTR * We check the range of this impedance to infer if the connected * device is MHL or USB and take appropriate actions. */ static int mhl_msm_read_rgnd_int(void) { uint8_t rgnd_imp; /* * DISC STATUS REG 2 * 1:0 RGND * 00 - open (USB) * 01 - 2 kOHM (USB) * 10 - 1 kOHM ***(MHL)**** It's range 800 - 1200 OHM from MHL spec * 11 - short (USB) */ rgnd_imp = (mhl_i2c_reg_read(TX_PAGE_3, 0x001C) & (BIT1 | BIT0)); pr_debug("Imp Range read = %02X\n", (int)rgnd_imp); if (0x02 == rgnd_imp) { pr_debug("MHL: MHL DEVICE!!!\n"); mhl_i2c_reg_modify(TX_PAGE_3, 0x0018, BIT0, BIT0); /* * Handling the MHL event in driver */ mhl_msm_state->mhl_mode = TRUE; if (notify_usb_online) notify_usb_online(1); } else { pr_debug("MHL: NON-MHL DEVICE!!!\n"); mhl_msm_state->mhl_mode = FALSE; mhl_i2c_reg_modify(TX_PAGE_3, 0x0018, BIT3, BIT3); switch_mode(POWER_STATE_D3); } complete(&mhl_msm_state->rgnd_done); return mhl_msm_state->mhl_mode ? MHL_DISCOVERY_RESULT_MHL : MHL_DISCOVERY_RESULT_USB; } static void force_usb_switch_open(void) { /*DISABLE_DISCOVERY*/ mhl_i2c_reg_modify(TX_PAGE_3, 0x0010, BIT0, 0); /* Force USB ID switch to open*/ mhl_i2c_reg_modify(TX_PAGE_3, 0x0015, BIT6, BIT6); mhl_i2c_reg_write(TX_PAGE_3, 0x0012, 0x86); /* Force HPD to 0 when not in Mobile HD mode. */ mhl_i2c_reg_modify(TX_PAGE_3, 0x0020, BIT5 | BIT4, BIT4); } static void release_usb_switch_open(void) { msleep(50); mhl_i2c_reg_modify(TX_PAGE_3, 0x0015, BIT6, 0x00); mhl_i2c_reg_modify(TX_PAGE_3, 0x0010, BIT0, BIT0); } static void int_4_isr(void) { uint8_t status, reg ; /* INTR_STATUS4 */ status = mhl_i2c_reg_read(TX_PAGE_3, 0x0021); /* * When I2C is inoperational (D3) and * a previous interrupt brought us here, * do nothing. */ if ((0x00 == status) && (mhl_msm_state->cur_state == POWER_STATE_D3)) { pr_debug("MHL: spurious interrupt\n"); return; } if (0xFF != status) { if ((status & BIT0) && (mhl_msm_state->chip_rev_id < 1)) { uint8_t tmds_cstat; uint8_t mhl_fifo_status; /* TMDS CSTAT */ tmds_cstat = mhl_i2c_reg_read(TX_PAGE_3, 0x0040); pr_debug("TMDS CSTAT: 0x%02x\n", tmds_cstat); if (tmds_cstat & 0x02) { mhl_fifo_status = mhl_i2c_reg_read(TX_PAGE_3, 0x0023); pr_debug("MHL FIFO status: 0x%02x\n", mhl_fifo_status); if (mhl_fifo_status & 0x0C) { mhl_i2c_reg_write(TX_PAGE_3, 0x0023, 0x0C); pr_debug("Apply MHL FIFO Reset\n"); mhl_i2c_reg_write(TX_PAGE_3, 0x0000, 0x94); mhl_i2c_reg_write(TX_PAGE_3, 0x0000, 0x84); } } } if (status & BIT1) pr_debug("MHL: INT4 BIT1 is set\n"); /* MHL_EST interrupt */ if (status & BIT2) { pr_debug("mhl_msm_connection() from ISR\n"); mhl_connect_api(true); mhl_msm_connection(); pr_debug("MHL Connect Drv: INT4 Status = %02X\n", (int) status); } else if (status & BIT3) { pr_debug("MHL: uUSB-A type device detected.\n"); mhl_i2c_reg_write(TX_PAGE_3, 0x001C, 0x80); switch_mode(POWER_STATE_D3); } if (status & BIT5) { mhl_connect_api(false); /* Clear interrupts - REG INTR4 */ reg = mhl_i2c_reg_read(TX_PAGE_3, 0x0021); mhl_i2c_reg_write(TX_PAGE_3, 0x0021, reg); mhl_msm_disconnection(); if (notify_usb_online) notify_usb_online(0); pr_debug("MHL Disconnect Drv: INT4 Status = %02X\n", (int)status); } if ((mhl_msm_state->cur_state != POWER_STATE_D0_MHL) &&\ (status & BIT6)) { /* RGND READY Intr */ switch_mode(POWER_STATE_D0_MHL); mhl_msm_read_rgnd_int(); } /* Can't succeed at these in D3 */ if (mhl_msm_state->cur_state != POWER_STATE_D3) { /* CBUS Lockout interrupt? */ /* * Hardware detection mechanism figures that * CBUS line is latched and raises this intr * where we force usb switch open and release */ if (status & BIT4) { force_usb_switch_open(); release_usb_switch_open(); } } } pr_debug("MHL END Drv: INT4 Status = %02X\n", (int) status); mhl_i2c_reg_write(TX_PAGE_3, 0x0021, status); return; } static void int_5_isr(void) { uint8_t intr_5_stat; /* * Clear INT 5 ?? * Probably need to revisit this later * INTR5 is related to FIFO underflow/overflow reset * which is handled in 8334 by auto FIFO reset */ intr_5_stat = mhl_i2c_reg_read(TX_PAGE_3, 0x0023); mhl_i2c_reg_write(TX_PAGE_3, 0x0023, intr_5_stat); } static void int_1_isr(void) { /* This ISR mainly handles the HPD status changes */ uint8_t intr_1_stat; uint8_t cbus_stat; /* INTR STATUS 1 */ intr_1_stat = mhl_i2c_reg_read(TX_PAGE_L0, 0x0071); if (intr_1_stat) { /* Clear interrupts */ mhl_i2c_reg_write(TX_PAGE_L0, 0x0071, intr_1_stat); if (BIT6 & intr_1_stat) { /* * HPD status change event is pending * Read CBUS HPD status for this info */ /* MSC REQ ABRT REASON */ cbus_stat = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x0D); if (BIT6 & cbus_stat) mhl_drive_hpd(HPD_UP); } } return; } /* * RCP, RAP messages - mandatory for compliance * */ static void mhl_cbus_isr(void) { uint8_t regval; int req_done = FALSE; uint8_t sub_cmd; uint8_t cmd_data; int msc_msg_recved = FALSE; int rc = -1; regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x08); if (regval == 0xff) return; /* clear all interrupts that were raised even if we did not process */ if (regval) mhl_i2c_reg_write(TX_PAGE_CBUS, 0x08, regval); pr_debug("%s: CBUS_INT = %02x\n", __func__, regval); /* MSC_MSG (RCP/RAP) */ if (regval & BIT(3)) { sub_cmd = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x18); cmd_data = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x19); msc_msg_recved = TRUE; } /* MSC_REQ_DONE */ if (regval & BIT(4)) req_done = TRUE; /* Now look for interrupts on CBUS_MSC_INT2 */ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x1E); /* clear all interrupts that were raised */ /* even if we did not process */ if (regval) mhl_i2c_reg_write(TX_PAGE_CBUS, 0x1E, regval); pr_debug("%s: CBUS_MSC_INT2 = %02x\n", __func__, regval); /* received SET_INT */ if (regval & BIT(2)) { uint8_t intr; intr = mhl_i2c_reg_read(TX_PAGE_CBUS, 0xA0); pr_debug("%s: MHL_INT_0 = %02x\n", __func__, intr); intr = mhl_i2c_reg_read(TX_PAGE_CBUS, 0xA1); pr_debug("%s: MHL_INT_1 = %02x\n", __func__, intr); mhl_i2c_reg_write(TX_PAGE_CBUS, 0xA0, 0xFF); mhl_i2c_reg_write(TX_PAGE_CBUS, 0xA1, 0xFF); mhl_i2c_reg_write(TX_PAGE_CBUS, 0xA2, 0xFF); mhl_i2c_reg_write(TX_PAGE_CBUS, 0xA3, 0xFF); } /* received WRITE_STAT */ if (regval & BIT(3)) { uint8_t stat; stat = mhl_i2c_reg_read(TX_PAGE_CBUS, 0xB0); pr_debug("%s: MHL_STATUS_0 = %02x\n", __func__, stat); stat = mhl_i2c_reg_read(TX_PAGE_CBUS, 0xB1); pr_debug("%s: MHL_STATUS_1 = %02x\n", __func__, stat); mhl_i2c_reg_write(TX_PAGE_CBUS, 0xB0, 0xFF); mhl_i2c_reg_write(TX_PAGE_CBUS, 0xB1, 0xFF); mhl_i2c_reg_write(TX_PAGE_CBUS, 0xB2, 0xFF); mhl_i2c_reg_write(TX_PAGE_CBUS, 0xB3, 0xFF); } /* received MSC_MSG */ if (msc_msg_recved) { /*mhl msc recv msc msg*/ if (rc) pr_err("MHL: mhl msc recv msc msg failed(%d)!\n", rc); } return; } static void clear_all_intrs(void) { uint8_t regval = 0x00; /* * intr status debug */ pr_debug("********* EXITING ISR MASK CHECK ?? *************\n"); pr_debug("Drv: INT1 MASK = %02X\n", (int) mhl_i2c_reg_read(TX_PAGE_L0, 0x0071)); pr_debug("Drv: INT3 MASK = %02X\n", (int) mhl_i2c_reg_read(TX_PAGE_L0, 0x0077)); pr_debug("Drv: INT4 MASK = %02X\n", (int) mhl_i2c_reg_read(TX_PAGE_3, 0x0021)); pr_debug("Drv: INT5 MASK = %02X\n", (int) mhl_i2c_reg_read(TX_PAGE_3, 0x0023)); pr_debug("Drv: CBUS1 MASK = %02X\n", (int) mhl_i2c_reg_read(TX_PAGE_CBUS, 0x0009)); pr_debug("Drv: CBUS2 MASK = %02X\n", (int) mhl_i2c_reg_read(TX_PAGE_CBUS, 0x001F)); pr_debug("********* END OF ISR MASK CHECK *************\n"); pr_debug("********* EXITING IN ISR ?? *************\n"); regval = mhl_i2c_reg_read(TX_PAGE_L0, 0x0071); pr_debug("Drv: INT1 Status = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_L0, 0x0071, regval); regval = mhl_i2c_reg_read(TX_PAGE_L0, 0x0072); pr_debug("Drv: INT2 Status = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_L0, 0x0072, regval); regval = mhl_i2c_reg_read(TX_PAGE_L0, 0x0073); pr_debug("Drv: INT3 Status = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_L0, 0x0073, regval); regval = mhl_i2c_reg_read(TX_PAGE_3, 0x0021); pr_debug("Drv: INT4 Status = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_3, 0x0021, regval); regval = mhl_i2c_reg_read(TX_PAGE_3, 0x0023); pr_debug("Drv: INT5 Status = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_3, 0x0023, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x0008); pr_debug("Drv: cbusInt Status = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0008, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x001E); pr_debug("Drv: CBUS INTR_2: %d\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x001E, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00A0); pr_debug("Drv: A0 INT Set = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00A0, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00A1); pr_debug("Drv: A1 INT Set = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00A1, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00A2); pr_debug("Drv: A2 INT Set = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00A2, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00A3); pr_debug("Drv: A3 INT Set = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00A3, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00B0); pr_debug("Drv: B0 STATUS Set = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00B0, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00B1); pr_debug("Drv: B1 STATUS Set = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00B1, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00B2); pr_debug("Drv: B2 STATUS Set = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00B2, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00B3); pr_debug("Drv: B3 STATUS Set = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00B3, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00E0); pr_debug("Drv: E0 STATUS Set = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00E0, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00E1); pr_debug("Drv: E1 STATUS Set = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00E1, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00E2); pr_debug("Drv: E2 STATUS Set = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00E2, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00E3); pr_debug("Drv: E3 STATUS Set = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00E3, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00F0); pr_debug("Drv: F0 INT Set = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00F0, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00F1); pr_debug("Drv: F1 INT Set = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00F1, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00F2); pr_debug("Drv: F2 INT Set = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00F2, regval); regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00F3); pr_debug("Drv: F3 INT Set = %02X\n", (int)regval); mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00F3, regval); pr_debug("********* END OF EXITING IN ISR *************\n"); } static irqreturn_t mhl_tx_isr(int irq, void *dev_id) { /* * Check RGND, MHL_EST, CBUS_LOCKOUT, SCDT * interrupts. In D3, we get only RGND */ int_4_isr(); pr_debug("MHL: Current POWER state is [0x%x]\n", mhl_msm_state->cur_state); if (mhl_msm_state->cur_state == POWER_STATE_D0_MHL) { /* * If int_4_isr() didn't move the tx to D3 * on disconnect, continue to check other * interrupt sources. */ int_5_isr(); /* * Check for any peer messages for DCAP_CHG etc * Dispatch to have the CBUS module working only * once connected. */ mhl_cbus_isr(); int_1_isr(); } clear_all_intrs(); return IRQ_HANDLED; } static void __exit mhl_msm_exit(void) { pr_warn("MHL: Exiting, Bye\n"); /* * Delete driver if i2c client structure is NULL */ i2c_del_driver(&mhl_sii_i2c_driver); if (!mhl_msm_state) { kfree(mhl_msm_state); mhl_msm_state = NULL; } } module_init(mhl_msm_init); module_exit(mhl_msm_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MHL SII 8334 TX driver");
gpl-2.0
schqiushui/kernel_kk444_sense_a31
drivers/staging/zcache/ramster/debug.c
2123
1772
#include <linux/atomic.h> #include "debug.h" ssize_t ramster_foreign_eph_pages; ssize_t ramster_foreign_pers_pages; #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> ssize_t ramster_eph_pages_remoted; ssize_t ramster_pers_pages_remoted; ssize_t ramster_eph_pages_remote_failed; ssize_t ramster_pers_pages_remote_failed; ssize_t ramster_remote_eph_pages_succ_get; ssize_t ramster_remote_pers_pages_succ_get; ssize_t ramster_remote_eph_pages_unsucc_get; ssize_t ramster_remote_pers_pages_unsucc_get; ssize_t ramster_pers_pages_remote_nomem; ssize_t ramster_remote_objects_flushed; ssize_t ramster_remote_object_flushes_failed; ssize_t ramster_remote_pages_flushed; ssize_t ramster_remote_page_flushes_failed; #define ATTR(x) { .name = #x, .val = &ramster_##x, } static struct debug_entry { const char *name; ssize_t *val; } attrs[] = { ATTR(eph_pages_remoted), ATTR(pers_pages_remoted), ATTR(eph_pages_remote_failed), ATTR(pers_pages_remote_failed), ATTR(remote_eph_pages_succ_get), ATTR(remote_pers_pages_succ_get), ATTR(remote_eph_pages_unsucc_get), ATTR(remote_pers_pages_unsucc_get), ATTR(pers_pages_remote_nomem), ATTR(remote_objects_flushed), ATTR(remote_pages_flushed), ATTR(remote_object_flushes_failed), ATTR(remote_page_flushes_failed), ATTR(foreign_eph_pages), ATTR(foreign_eph_pages_max), ATTR(foreign_pers_pages), ATTR(foreign_pers_pages_max), }; #undef ATTR int ramster_debugfs_init(void) { int i; struct dentry *root = debugfs_create_dir("ramster", NULL); if (root == NULL) return -ENXIO; for (i = 0; i < ARRAY_SIZE(attrs); i++) if (!debugfs_create_size_t(attrs[i].name, S_IRUGO, root, attrs[i].val)) goto out; return 0; out: return -ENODEV; } #else static inline int ramster_debugfs_init(void) { return 0; } #endif
gpl-2.0
imnuts/android_kernel_samsung_smdk4412
drivers/ata/pata_pcmcia.c
2379
13741
/* * pata_pcmcia.c - PCMCIA PATA controller driver. * Copyright 2005-2006 Red Hat Inc, all rights reserved. * PCMCIA ident update Copyright 2006 Marcin Juszkiewicz * <openembedded@hrw.one.pl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * Heavily based upon ide-cs.c * The initial developer of the original code is David A. Hinds * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/slab.h> #include <scsi/scsi_host.h> #include <linux/ata.h> #include <linux/libata.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> #define DRV_NAME "pata_pcmcia" #define DRV_VERSION "0.3.5" /** * pcmcia_set_mode - PCMCIA specific mode setup * @link: link * @r_failed_dev: Return pointer for failed device * * Perform the tuning and setup of the devices and timings, which * for PCMCIA is the same as any other controller. We wrap it however * as we need to spot hardware with incorrect or missing master/slave * decode, which alas is embarrassingly common in the PC world */ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) { struct ata_device *master = &link->device[0]; struct ata_device *slave = &link->device[1]; if (!ata_dev_enabled(master) || !ata_dev_enabled(slave)) return ata_do_set_mode(link, r_failed_dev); if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV, ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) { /* Suspicious match, but could be two cards from the same vendor - check serial */ if (memcmp(master->id + ATA_ID_SERNO, slave->id + ATA_ID_SERNO, ATA_ID_SERNO_LEN) == 0 && master->id[ATA_ID_SERNO] >> 8) { ata_dev_printk(slave, KERN_WARNING, "is a ghost device, ignoring.\n"); ata_dev_disable(slave); } } return ata_do_set_mode(link, r_failed_dev); } /** * pcmcia_set_mode_8bit - PCMCIA specific mode setup * @link: link * @r_failed_dev: Return pointer for failed device * * For the simple emulated 8bit stuff the less we do the better. */ static int pcmcia_set_mode_8bit(struct ata_link *link, struct ata_device **r_failed_dev) { return 0; } /** * ata_data_xfer_8bit - Transfer data by 8bit PIO * @dev: device to target * @buf: data buffer * @buflen: buffer length * @rw: read/write * * Transfer data from/to the device data register by 8 bit PIO. * * LOCKING: * Inherited from caller. */ static unsigned int ata_data_xfer_8bit(struct ata_device *dev, unsigned char *buf, unsigned int buflen, int rw) { struct ata_port *ap = dev->link->ap; if (rw == READ) ioread8_rep(ap->ioaddr.data_addr, buf, buflen); else iowrite8_rep(ap->ioaddr.data_addr, buf, buflen); return buflen; } /** * pcmcia_8bit_drain_fifo - Stock FIFO drain logic for SFF controllers * @qc: command * * Drain the FIFO and device of any stuck data following a command * failing to complete. In some cases this is necessary before a * reset will recover the device. * */ static void pcmcia_8bit_drain_fifo(struct ata_queued_cmd *qc) { int count; struct ata_port *ap; /* We only need to flush incoming data when a command was running */ if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) return; ap = qc->ap; /* Drain up to 64K of data before we give up this recovery method */ for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) && count++ < 65536;) ioread8(ap->ioaddr.data_addr); if (count) ata_port_printk(ap, KERN_WARNING, "drained %d bytes to clear DRQ.\n", count); } static struct scsi_host_template pcmcia_sht = { ATA_PIO_SHT(DRV_NAME), }; static struct ata_port_operations pcmcia_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ata_sff_data_xfer_noirq, .cable_detect = ata_cable_40wire, .set_mode = pcmcia_set_mode, }; static struct ata_port_operations pcmcia_8bit_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ata_data_xfer_8bit, .cable_detect = ata_cable_40wire, .set_mode = pcmcia_set_mode_8bit, .sff_drain_fifo = pcmcia_8bit_drain_fifo, }; static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data) { int *is_kme = priv_data; if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) { pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; } pdev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; pdev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; if (pdev->resource[1]->end) { pdev->resource[0]->end = 8; pdev->resource[1]->end = (*is_kme) ? 2 : 1; } else { if (pdev->resource[0]->end < 16) return -ENODEV; } return pcmcia_request_io(pdev); } /** * pcmcia_init_one - attach a PCMCIA interface * @pdev: pcmcia device * * Register a PCMCIA IDE interface. Such interfaces are PIO 0 and * shared IRQ. */ static int pcmcia_init_one(struct pcmcia_device *pdev) { struct ata_host *host; struct ata_port *ap; int is_kme = 0, ret = -ENOMEM, p; unsigned long io_base, ctl_base; void __iomem *io_addr, *ctl_addr; int n_ports = 1; struct ata_port_operations *ops = &pcmcia_port_ops; /* Set up attributes in order to probe card and get resources */ pdev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO | CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC; /* See if we have a manufacturer identifier. Use it to set is_kme for vendor quirks */ is_kme = ((pdev->manf_id == MANFID_KME) && ((pdev->card_id == PRODID_KME_KXLC005_A) || (pdev->card_id == PRODID_KME_KXLC005_B))); if (pcmcia_loop_config(pdev, pcmcia_check_one_config, &is_kme)) { pdev->config_flags &= ~CONF_AUTO_CHECK_VCC; if (pcmcia_loop_config(pdev, pcmcia_check_one_config, &is_kme)) goto failed; /* No suitable config found */ } io_base = pdev->resource[0]->start; if (pdev->resource[1]->end) ctl_base = pdev->resource[1]->start; else ctl_base = pdev->resource[0]->start + 0x0e; if (!pdev->irq) goto failed; ret = pcmcia_enable_device(pdev); if (ret) goto failed; /* iomap */ ret = -ENOMEM; io_addr = devm_ioport_map(&pdev->dev, io_base, 8); ctl_addr = devm_ioport_map(&pdev->dev, ctl_base, 1); if (!io_addr || !ctl_addr) goto failed; /* Success. Disable the IRQ nIEN line, do quirks */ iowrite8(0x02, ctl_addr); if (is_kme) iowrite8(0x81, ctl_addr + 0x01); /* FIXME: Could be more ports at base + 0x10 but we only deal with one right now */ if (resource_size(pdev->resource[0]) >= 0x20) n_ports = 2; if (pdev->manf_id == 0x0097 && pdev->card_id == 0x1620) ops = &pcmcia_8bit_port_ops; /* * Having done the PCMCIA plumbing the ATA side is relatively * sane. */ ret = -ENOMEM; host = ata_host_alloc(&pdev->dev, n_ports); if (!host) goto failed; for (p = 0; p < n_ports; p++) { ap = host->ports[p]; ap->ops = ops; ap->pio_mask = ATA_PIO0; /* ISA so PIO 0 cycles */ ap->flags |= ATA_FLAG_SLAVE_POSS; ap->ioaddr.cmd_addr = io_addr + 0x10 * p; ap->ioaddr.altstatus_addr = ctl_addr + 0x10 * p; ap->ioaddr.ctl_addr = ctl_addr + 0x10 * p; ata_sff_std_ports(&ap->ioaddr); ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base); } /* activate */ ret = ata_host_activate(host, pdev->irq, ata_sff_interrupt, IRQF_SHARED, &pcmcia_sht); if (ret) goto failed; pdev->priv = host; return 0; failed: pcmcia_disable_device(pdev); return ret; } /** * pcmcia_remove_one - unplug an pcmcia interface * @pdev: pcmcia device * * A PCMCIA ATA device has been unplugged. Perform the needed * cleanup. Also called on module unload for any active devices. */ static void pcmcia_remove_one(struct pcmcia_device *pdev) { struct ata_host *host = pdev->priv; if (host) ata_host_detach(host); pcmcia_disable_device(pdev); } static const struct pcmcia_device_id pcmcia_devices[] = { PCMCIA_DEVICE_FUNC_ID(4), PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000), /* Corsair */ PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */ PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000), /* I-O Data CFA */ PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001), /* Mitsubishi CFA */ PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704), PCMCIA_DEVICE_MANF_CARD(0x0032, 0x2904), PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */ PCMCIA_DEVICE_MANF_CARD(0x004f, 0x0000), /* Kingston */ PCMCIA_DEVICE_MANF_CARD(0x0097, 0x1620), /* TI emulated */ PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */ PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d), PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */ PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */ PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001), PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */ PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar, Viking CFA */ PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0), PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74), PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591), PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), PCMCIA_DEVICE_PROD_ID12("EXP", "CD+GAME", 0x6f58c983, 0x63c13aaf), PCMCIA_DEVICE_PROD_ID12("EXP ", "CD-ROM", 0x0a5c52fd, 0x66536591), PCMCIA_DEVICE_PROD_ID12("EXP ", "PnPIDE", 0x0a5c52fd, 0x0c694728), PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e), PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae), PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178), PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x55d5bffb), PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10), PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674), PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b), PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF300", 0x7ed2ad87, 0x7e9e78ee), PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c), PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728), PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1), PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883), PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d), PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6), PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003), PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF45", 0x709b1bf1, 0xf68b6f32), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x7558f133), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47), PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e), PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6), PCMCIA_DEVICE_PROD_ID2("Flash Card", 0x5a362506), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, pcmcia_devices); static struct pcmcia_driver pcmcia_driver = { .owner = THIS_MODULE, .name = DRV_NAME, .id_table = pcmcia_devices, .probe = pcmcia_init_one, .remove = pcmcia_remove_one, }; static int __init pcmcia_init(void) { return pcmcia_register_driver(&pcmcia_driver); } static void __exit pcmcia_exit(void) { pcmcia_unregister_driver(&pcmcia_driver); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for PCMCIA ATA"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); module_init(pcmcia_init); module_exit(pcmcia_exit);
gpl-2.0
Hani-K/H-Vitamin_trlte
drivers/iommu/shmobile-ipmmu.c
2379
3280
/* * IPMMU/IPMMUI * Copyright (C) 2012 Hideki EIRAKU * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. */ #include <linux/err.h> #include <linux/export.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/platform_data/sh_ipmmu.h> #include "shmobile-ipmmu.h" #define IMCTR1 0x000 #define IMCTR2 0x004 #define IMASID 0x010 #define IMTTBR 0x014 #define IMTTBCR 0x018 #define IMCTR1_TLBEN (1 << 0) #define IMCTR1_FLUSH (1 << 1) static void ipmmu_reg_write(struct shmobile_ipmmu *ipmmu, unsigned long reg_off, unsigned long data) { iowrite32(data, ipmmu->ipmmu_base + reg_off); } void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu) { if (!ipmmu) return; mutex_lock(&ipmmu->flush_lock); if (ipmmu->tlb_enabled) ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN); else ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH); mutex_unlock(&ipmmu->flush_lock); } void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size, int asid) { if (!ipmmu) return; mutex_lock(&ipmmu->flush_lock); switch (size) { default: ipmmu->tlb_enabled = 0; break; case 0x2000: ipmmu_reg_write(ipmmu, IMTTBCR, 1); ipmmu->tlb_enabled = 1; break; case 0x1000: ipmmu_reg_write(ipmmu, IMTTBCR, 2); ipmmu->tlb_enabled = 1; break; case 0x800: ipmmu_reg_write(ipmmu, IMTTBCR, 3); ipmmu->tlb_enabled = 1; break; case 0x400: ipmmu_reg_write(ipmmu, IMTTBCR, 4); ipmmu->tlb_enabled = 1; break; case 0x200: ipmmu_reg_write(ipmmu, IMTTBCR, 5); ipmmu->tlb_enabled = 1; break; case 0x100: ipmmu_reg_write(ipmmu, IMTTBCR, 6); ipmmu->tlb_enabled = 1; break; case 0x80: ipmmu_reg_write(ipmmu, IMTTBCR, 7); ipmmu->tlb_enabled = 1; break; } ipmmu_reg_write(ipmmu, IMTTBR, phys); ipmmu_reg_write(ipmmu, IMASID, asid); mutex_unlock(&ipmmu->flush_lock); } static int ipmmu_probe(struct platform_device *pdev) { struct shmobile_ipmmu *ipmmu; struct resource *res; struct shmobile_ipmmu_platform_data *pdata = pdev->dev.platform_data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "cannot get platform resources\n"); return -ENOENT; } ipmmu = devm_kzalloc(&pdev->dev, sizeof(*ipmmu), GFP_KERNEL); if (!ipmmu) { dev_err(&pdev->dev, "cannot allocate device data\n"); return -ENOMEM; } mutex_init(&ipmmu->flush_lock); ipmmu->dev = &pdev->dev; ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (!ipmmu->ipmmu_base) { dev_err(&pdev->dev, "ioremap_nocache failed\n"); return -ENOMEM; } ipmmu->dev_names = pdata->dev_names; ipmmu->num_dev_names = pdata->num_dev_names; platform_set_drvdata(pdev, ipmmu); ipmmu_reg_write(ipmmu, IMCTR1, 0x0); /* disable TLB */ ipmmu_reg_write(ipmmu, IMCTR2, 0x0); /* disable PMB */ ipmmu_iommu_init(ipmmu); return 0; } static struct platform_driver ipmmu_driver = { .probe = ipmmu_probe, .driver = { .owner = THIS_MODULE, .name = "ipmmu", }, }; static int __init ipmmu_init(void) { return platform_driver_register(&ipmmu_driver); } subsys_initcall(ipmmu_init);
gpl-2.0
garwynn/L900_LJC_Kernel
drivers/net/bonding/bond_procfs.c
2379
7119
#include <linux/proc_fs.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include "bonding.h" static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) __acquires(&bond->lock) { struct bonding *bond = seq->private; loff_t off = 0; struct slave *slave; int i; /* make sure the bond won't be taken away */ rcu_read_lock(); read_lock(&bond->lock); if (*pos == 0) return SEQ_START_TOKEN; bond_for_each_slave(bond, slave, i) { if (++off == *pos) return slave; } return NULL; } static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct bonding *bond = seq->private; struct slave *slave = v; ++*pos; if (v == SEQ_START_TOKEN) return bond->first_slave; slave = slave->next; return (slave == bond->first_slave) ? NULL : slave; } static void bond_info_seq_stop(struct seq_file *seq, void *v) __releases(&bond->lock) __releases(RCU) { struct bonding *bond = seq->private; read_unlock(&bond->lock); rcu_read_unlock(); } static void bond_info_show_master(struct seq_file *seq) { struct bonding *bond = seq->private; struct slave *curr; int i; read_lock(&bond->curr_slave_lock); curr = bond->curr_active_slave; read_unlock(&bond->curr_slave_lock); seq_printf(seq, "Bonding Mode: %s", bond_mode_name(bond->params.mode)); if (bond->params.mode == BOND_MODE_ACTIVEBACKUP && bond->params.fail_over_mac) seq_printf(seq, " (fail_over_mac %s)", fail_over_mac_tbl[bond->params.fail_over_mac].modename); seq_printf(seq, "\n"); if (bond->params.mode == BOND_MODE_XOR || bond->params.mode == BOND_MODE_8023AD) { seq_printf(seq, "Transmit Hash Policy: %s (%d)\n", xmit_hashtype_tbl[bond->params.xmit_policy].modename, bond->params.xmit_policy); } if (USES_PRIMARY(bond->params.mode)) { seq_printf(seq, "Primary Slave: %s", (bond->primary_slave) ? bond->primary_slave->dev->name : "None"); if (bond->primary_slave) seq_printf(seq, " (primary_reselect %s)", pri_reselect_tbl[bond->params.primary_reselect].modename); seq_printf(seq, "\nCurrently Active Slave: %s\n", (curr) ? curr->dev->name : "None"); } seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ? "up" : "down"); seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon); seq_printf(seq, "Up Delay (ms): %d\n", bond->params.updelay * bond->params.miimon); seq_printf(seq, "Down Delay (ms): %d\n", bond->params.downdelay * bond->params.miimon); /* ARP information */ if (bond->params.arp_interval > 0) { int printed = 0; seq_printf(seq, "ARP Polling Interval (ms): %d\n", bond->params.arp_interval); seq_printf(seq, "ARP IP target/s (n.n.n.n form):"); for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { if (!bond->params.arp_targets[i]) break; if (printed) seq_printf(seq, ","); seq_printf(seq, " %pI4", &bond->params.arp_targets[i]); printed = 1; } seq_printf(seq, "\n"); } if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; seq_puts(seq, "\n802.3ad info\n"); seq_printf(seq, "LACP rate: %s\n", (bond->params.lacp_fast) ? "fast" : "slow"); seq_printf(seq, "Aggregator selection policy (ad_select): %s\n", ad_select_tbl[bond->params.ad_select].modename); if (bond_3ad_get_active_agg_info(bond, &ad_info)) { seq_printf(seq, "bond %s has no active aggregator\n", bond->dev->name); } else { seq_printf(seq, "Active Aggregator Info:\n"); seq_printf(seq, "\tAggregator ID: %d\n", ad_info.aggregator_id); seq_printf(seq, "\tNumber of ports: %d\n", ad_info.ports); seq_printf(seq, "\tActor Key: %d\n", ad_info.actor_key); seq_printf(seq, "\tPartner Key: %d\n", ad_info.partner_key); seq_printf(seq, "\tPartner Mac Address: %pM\n", ad_info.partner_system); } } } static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave) { struct bonding *bond = seq->private; seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name); seq_printf(seq, "MII Status: %s\n", (slave->link == BOND_LINK_UP) ? "up" : "down"); seq_printf(seq, "Speed: %d Mbps\n", slave->speed); seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half"); seq_printf(seq, "Link Failure Count: %u\n", slave->link_failure_count); seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr); if (bond->params.mode == BOND_MODE_8023AD) { const struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator; if (agg) seq_printf(seq, "Aggregator ID: %d\n", agg->aggregator_identifier); else seq_puts(seq, "Aggregator ID: N/A\n"); } seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); } static int bond_info_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_printf(seq, "%s\n", bond_version); bond_info_show_master(seq); } else bond_info_show_slave(seq, v); return 0; } static const struct seq_operations bond_info_seq_ops = { .start = bond_info_seq_start, .next = bond_info_seq_next, .stop = bond_info_seq_stop, .show = bond_info_seq_show, }; static int bond_info_open(struct inode *inode, struct file *file) { struct seq_file *seq; struct proc_dir_entry *proc; int res; res = seq_open(file, &bond_info_seq_ops); if (!res) { /* recover the pointer buried in proc_dir_entry data */ seq = file->private_data; proc = PDE(inode); seq->private = proc->data; } return res; } static const struct file_operations bond_info_fops = { .owner = THIS_MODULE, .open = bond_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; void bond_create_proc_entry(struct bonding *bond) { struct net_device *bond_dev = bond->dev; struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); if (bn->proc_dir) { bond->proc_entry = proc_create_data(bond_dev->name, S_IRUGO, bn->proc_dir, &bond_info_fops, bond); if (bond->proc_entry == NULL) pr_warning("Warning: Cannot create /proc/net/%s/%s\n", DRV_NAME, bond_dev->name); else memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ); } } void bond_remove_proc_entry(struct bonding *bond) { struct net_device *bond_dev = bond->dev; struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); if (bn->proc_dir && bond->proc_entry) { remove_proc_entry(bond->proc_file_name, bn->proc_dir); memset(bond->proc_file_name, 0, IFNAMSIZ); bond->proc_entry = NULL; } } /* Create the bonding directory under /proc/net, if doesn't exist yet. * Caller must hold rtnl_lock. */ void __net_init bond_create_proc_dir(struct bond_net *bn) { if (!bn->proc_dir) { bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net); if (!bn->proc_dir) pr_warning("Warning: cannot create /proc/net/%s\n", DRV_NAME); } } /* Destroy the bonding directory under /proc/net, if empty. * Caller must hold rtnl_lock. */ void __net_exit bond_destroy_proc_dir(struct bond_net *bn) { if (bn->proc_dir) { remove_proc_entry(DRV_NAME, bn->net->proc_net); bn->proc_dir = NULL; } }
gpl-2.0
ollie27/android_kernel_samsung_aries
fs/ceph/ioctl.c
2379
6984
#include <linux/in.h> #include "super.h" #include "mds_client.h" #include <linux/ceph/ceph_debug.h> #include "ioctl.h" /* * ioctls */ /* * get and set the file layout */ static long ceph_ioctl_get_layout(struct file *file, void __user *arg) { struct ceph_inode_info *ci = ceph_inode(file->f_dentry->d_inode); struct ceph_ioctl_layout l; int err; err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT); if (!err) { l.stripe_unit = ceph_file_layout_su(ci->i_layout); l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout); l.object_size = ceph_file_layout_object_size(ci->i_layout); l.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool); l.preferred_osd = (s32)le32_to_cpu(ci->i_layout.fl_pg_preferred); if (copy_to_user(arg, &l, sizeof(l))) return -EFAULT; } return err; } static long ceph_ioctl_set_layout(struct file *file, void __user *arg) { struct inode *inode = file->f_dentry->d_inode; struct inode *parent_inode = file->f_dentry->d_parent->d_inode; struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_mds_request *req; struct ceph_ioctl_layout l; int err, i; /* copy and validate */ if (copy_from_user(&l, arg, sizeof(l))) return -EFAULT; if ((l.object_size & ~PAGE_MASK) || (l.stripe_unit & ~PAGE_MASK) || !l.stripe_unit || (l.object_size && (unsigned)l.object_size % (unsigned)l.stripe_unit)) return -EINVAL; /* make sure it's a valid data pool */ if (l.data_pool > 0) { mutex_lock(&mdsc->mutex); err = -EINVAL; for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++) if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) { err = 0; break; } mutex_unlock(&mdsc->mutex); if (err) return err; } req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT, USE_AUTH_MDS); if (IS_ERR(req)) return PTR_ERR(req); req->r_inode = inode; ihold(inode); req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL; req->r_args.setlayout.layout.fl_stripe_unit = cpu_to_le32(l.stripe_unit); req->r_args.setlayout.layout.fl_stripe_count = cpu_to_le32(l.stripe_count); req->r_args.setlayout.layout.fl_object_size = cpu_to_le32(l.object_size); req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool); req->r_args.setlayout.layout.fl_pg_preferred = cpu_to_le32(l.preferred_osd); err = ceph_mdsc_do_request(mdsc, parent_inode, req); ceph_mdsc_put_request(req); return err; } /* * Set a layout policy on a directory inode. All items in the tree * rooted at this inode will inherit this layout on creation, * (It doesn't apply retroactively ) * unless a subdirectory has its own layout policy. */ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg) { struct inode *inode = file->f_dentry->d_inode; struct ceph_mds_request *req; struct ceph_ioctl_layout l; int err, i; struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; /* copy and validate */ if (copy_from_user(&l, arg, sizeof(l))) return -EFAULT; if ((l.object_size & ~PAGE_MASK) || (l.stripe_unit & ~PAGE_MASK) || !l.stripe_unit || (l.object_size && (unsigned)l.object_size % (unsigned)l.stripe_unit)) return -EINVAL; /* make sure it's a valid data pool */ if (l.data_pool > 0) { mutex_lock(&mdsc->mutex); err = -EINVAL; for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++) if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) { err = 0; break; } mutex_unlock(&mdsc->mutex); if (err) return err; } req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETDIRLAYOUT, USE_AUTH_MDS); if (IS_ERR(req)) return PTR_ERR(req); req->r_inode = inode; ihold(inode); req->r_args.setlayout.layout.fl_stripe_unit = cpu_to_le32(l.stripe_unit); req->r_args.setlayout.layout.fl_stripe_count = cpu_to_le32(l.stripe_count); req->r_args.setlayout.layout.fl_object_size = cpu_to_le32(l.object_size); req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool); req->r_args.setlayout.layout.fl_pg_preferred = cpu_to_le32(l.preferred_osd); err = ceph_mdsc_do_request(mdsc, inode, req); ceph_mdsc_put_request(req); return err; } /* * Return object name, size/offset information, and location (OSD * number, network address) for a given file offset. */ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg) { struct ceph_ioctl_dataloc dl; struct inode *inode = file->f_dentry->d_inode; struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->client->osdc; u64 len = 1, olen; u64 tmp; struct ceph_object_layout ol; struct ceph_pg pgid; /* copy and validate */ if (copy_from_user(&dl, arg, sizeof(dl))) return -EFAULT; down_read(&osdc->map_sem); ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, &len, &dl.object_no, &dl.object_offset, &olen); dl.file_offset -= dl.object_offset; dl.object_size = ceph_file_layout_object_size(ci->i_layout); dl.block_size = ceph_file_layout_su(ci->i_layout); /* block_offset = object_offset % block_size */ tmp = dl.object_offset; dl.block_offset = do_div(tmp, dl.block_size); snprintf(dl.object_name, sizeof(dl.object_name), "%llx.%08llx", ceph_ino(inode), dl.object_no); ceph_calc_object_layout(&ol, dl.object_name, &ci->i_layout, osdc->osdmap); pgid = ol.ol_pgid; dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid); if (dl.osd >= 0) { struct ceph_entity_addr *a = ceph_osd_addr(osdc->osdmap, dl.osd); if (a) memcpy(&dl.osd_addr, &a->in_addr, sizeof(dl.osd_addr)); } else { memset(&dl.osd_addr, 0, sizeof(dl.osd_addr)); } up_read(&osdc->map_sem); /* send result back to user */ if (copy_to_user(arg, &dl, sizeof(dl))) return -EFAULT; return 0; } static long ceph_ioctl_lazyio(struct file *file) { struct ceph_file_info *fi = file->private_data; struct inode *inode = file->f_dentry->d_inode; struct ceph_inode_info *ci = ceph_inode(inode); if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) { spin_lock(&inode->i_lock); ci->i_nr_by_mode[fi->fmode]--; fi->fmode |= CEPH_FILE_MODE_LAZY; ci->i_nr_by_mode[fi->fmode]++; spin_unlock(&inode->i_lock); dout("ioctl_layzio: file %p marked lazy\n", file); ceph_check_caps(ci, 0, NULL); } else { dout("ioctl_layzio: file %p already lazy\n", file); } return 0; } long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { dout("ioctl file %p cmd %u arg %lu\n", file, cmd, arg); switch (cmd) { case CEPH_IOC_GET_LAYOUT: return ceph_ioctl_get_layout(file, (void __user *)arg); case CEPH_IOC_SET_LAYOUT: return ceph_ioctl_set_layout(file, (void __user *)arg); case CEPH_IOC_SET_LAYOUT_POLICY: return ceph_ioctl_set_layout_policy(file, (void __user *)arg); case CEPH_IOC_GET_DATALOC: return ceph_ioctl_get_dataloc(file, (void __user *)arg); case CEPH_IOC_LAZYIO: return ceph_ioctl_lazyio(file); } return -ENOTTY; }
gpl-2.0
AndroidOpenSourceXperia/android_kernel_sony_u8500
fs/9p/vfs_super.c
2891
8805
/* * linux/fs/9p/vfs_super.c * * This file contians superblock ops for 9P2000. It is intended that * you mount this file system on directories. * * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to: * Free Software Foundation * 51 Franklin Street, Fifth Floor * Boston, MA 02111-1301 USA * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/inet.h> #include <linux/pagemap.h> #include <linux/seq_file.h> #include <linux/mount.h> #include <linux/idr.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/statfs.h> #include <linux/magic.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" #include "xattr.h" #include "acl.h" static const struct super_operations v9fs_super_ops, v9fs_super_ops_dotl; /** * v9fs_set_super - set the superblock * @s: super block * @data: file system specific data * */ static int v9fs_set_super(struct super_block *s, void *data) { s->s_fs_info = data; return set_anon_super(s, data); } /** * v9fs_fill_super - populate superblock with info * @sb: superblock * @v9ses: session information * @flags: flags propagated from v9fs_mount() * */ static void v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses, int flags, void *data) { sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize_bits = fls(v9ses->maxdata - 1); sb->s_blocksize = 1 << sb->s_blocksize_bits; sb->s_magic = V9FS_MAGIC; if (v9fs_proto_dotl(v9ses)) { sb->s_op = &v9fs_super_ops_dotl; sb->s_xattr = v9fs_xattr_handlers; } else sb->s_op = &v9fs_super_ops; sb->s_bdi = &v9ses->bdi; if (v9ses->cache) sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_CACHE_SIZE; sb->s_flags = flags | MS_ACTIVE | MS_DIRSYNC | MS_NOATIME; if (!v9ses->cache) sb->s_flags |= MS_SYNCHRONOUS; #ifdef CONFIG_9P_FS_POSIX_ACL if ((v9ses->flags & V9FS_ACL_MASK) == V9FS_POSIX_ACL) sb->s_flags |= MS_POSIXACL; #endif save_mount_options(sb, data); } /** * v9fs_mount - mount a superblock * @fs_type: file system type * @flags: mount flags * @dev_name: device name that was mounted * @data: mount options * */ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct super_block *sb = NULL; struct inode *inode = NULL; struct dentry *root = NULL; struct v9fs_session_info *v9ses = NULL; int mode = S_IRWXUGO | S_ISVTX; struct p9_fid *fid; int retval = 0; P9_DPRINTK(P9_DEBUG_VFS, " \n"); v9ses = kzalloc(sizeof(struct v9fs_session_info), GFP_KERNEL); if (!v9ses) return ERR_PTR(-ENOMEM); fid = v9fs_session_init(v9ses, dev_name, data); if (IS_ERR(fid)) { retval = PTR_ERR(fid); /* * we need to call session_close to tear down some * of the data structure setup by session_init */ goto close_session; } sb = sget(fs_type, NULL, v9fs_set_super, v9ses); if (IS_ERR(sb)) { retval = PTR_ERR(sb); goto clunk_fid; } v9fs_fill_super(sb, v9ses, flags, data); if (v9ses->cache) sb->s_d_op = &v9fs_cached_dentry_operations; else sb->s_d_op = &v9fs_dentry_operations; inode = v9fs_get_inode(sb, S_IFDIR | mode, 0); if (IS_ERR(inode)) { retval = PTR_ERR(inode); goto release_sb; } root = d_alloc_root(inode); if (!root) { iput(inode); retval = -ENOMEM; goto release_sb; } sb->s_root = root; if (v9fs_proto_dotl(v9ses)) { struct p9_stat_dotl *st = NULL; st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); if (IS_ERR(st)) { retval = PTR_ERR(st); goto release_sb; } root->d_inode->i_ino = v9fs_qid2ino(&st->qid); v9fs_stat2inode_dotl(st, root->d_inode); kfree(st); } else { struct p9_wstat *st = NULL; st = p9_client_stat(fid); if (IS_ERR(st)) { retval = PTR_ERR(st); goto release_sb; } root->d_inode->i_ino = v9fs_qid2ino(&st->qid); v9fs_stat2inode(st, root->d_inode, sb); p9stat_free(st); kfree(st); } retval = v9fs_get_acl(inode, fid); if (retval) goto release_sb; v9fs_fid_add(root, fid); P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); return dget(sb->s_root); clunk_fid: p9_client_clunk(fid); close_session: v9fs_session_close(v9ses); kfree(v9ses); return ERR_PTR(retval); release_sb: /* * we will do the session_close and root dentry release * in the below call. But we need to clunk fid, because we haven't * attached the fid to dentry so it won't get clunked * automatically. */ p9_client_clunk(fid); deactivate_locked_super(sb); return ERR_PTR(retval); } /** * v9fs_kill_super - Kill Superblock * @s: superblock * */ static void v9fs_kill_super(struct super_block *s) { struct v9fs_session_info *v9ses = s->s_fs_info; P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s); kill_anon_super(s); v9fs_session_cancel(v9ses); v9fs_session_close(v9ses); kfree(v9ses); s->s_fs_info = NULL; P9_DPRINTK(P9_DEBUG_VFS, "exiting kill_super\n"); } static void v9fs_umount_begin(struct super_block *sb) { struct v9fs_session_info *v9ses; v9ses = sb->s_fs_info; v9fs_session_begin_cancel(v9ses); } static int v9fs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct v9fs_session_info *v9ses; struct p9_fid *fid; struct p9_rstatfs rs; int res; fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) { res = PTR_ERR(fid); goto done; } v9ses = v9fs_dentry2v9ses(dentry); if (v9fs_proto_dotl(v9ses)) { res = p9_client_statfs(fid, &rs); if (res == 0) { buf->f_type = V9FS_MAGIC; buf->f_bsize = rs.bsize; buf->f_blocks = rs.blocks; buf->f_bfree = rs.bfree; buf->f_bavail = rs.bavail; buf->f_files = rs.files; buf->f_ffree = rs.ffree; buf->f_fsid.val[0] = rs.fsid & 0xFFFFFFFFUL; buf->f_fsid.val[1] = (rs.fsid >> 32) & 0xFFFFFFFFUL; buf->f_namelen = rs.namelen; } if (res != -ENOSYS) goto done; } res = simple_statfs(dentry, buf); done: return res; } static int v9fs_drop_inode(struct inode *inode) { struct v9fs_session_info *v9ses; v9ses = v9fs_inode2v9ses(inode); if (v9ses->cache) return generic_drop_inode(inode); /* * in case of non cached mode always drop the * the inode because we want the inode attribute * to always match that on the server. */ return 1; } static int v9fs_write_inode(struct inode *inode, struct writeback_control *wbc) { int ret; struct p9_wstat wstat; struct v9fs_inode *v9inode; /* * send an fsync request to server irrespective of * wbc->sync_mode. */ P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode); v9inode = V9FS_I(inode); if (!v9inode->writeback_fid) return 0; v9fs_blank_wstat(&wstat); ret = p9_client_wstat(v9inode->writeback_fid, &wstat); if (ret < 0) { __mark_inode_dirty(inode, I_DIRTY_DATASYNC); return ret; } return 0; } static int v9fs_write_inode_dotl(struct inode *inode, struct writeback_control *wbc) { int ret; struct v9fs_inode *v9inode; /* * send an fsync request to server irrespective of * wbc->sync_mode. */ P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode); v9inode = V9FS_I(inode); if (!v9inode->writeback_fid) return 0; ret = p9_client_fsync(v9inode->writeback_fid, 0); if (ret < 0) { __mark_inode_dirty(inode, I_DIRTY_DATASYNC); return ret; } return 0; } static const struct super_operations v9fs_super_ops = { .alloc_inode = v9fs_alloc_inode, .destroy_inode = v9fs_destroy_inode, .statfs = simple_statfs, .evict_inode = v9fs_evict_inode, .show_options = generic_show_options, .umount_begin = v9fs_umount_begin, .write_inode = v9fs_write_inode, }; static const struct super_operations v9fs_super_ops_dotl = { .alloc_inode = v9fs_alloc_inode, .destroy_inode = v9fs_destroy_inode, .statfs = v9fs_statfs, .drop_inode = v9fs_drop_inode, .evict_inode = v9fs_evict_inode, .show_options = generic_show_options, .umount_begin = v9fs_umount_begin, .write_inode = v9fs_write_inode_dotl, }; struct file_system_type v9fs_fs_type = { .name = "9p", .mount = v9fs_mount, .kill_sb = v9fs_kill_super, .owner = THIS_MODULE, .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT, };
gpl-2.0
trupeace/arm10c-tp
arch/mips/mti-sead3/sead3-serial.c
3147
1189
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. */ #include <linux/module.h> #include <linux/init.h> #include <linux/serial_8250.h> #define UART(base, int) \ { \ .mapbase = base, \ .irq = int, \ .uartclk = 14745600, \ .iotype = UPIO_MEM32, \ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, \ .regshift = 2, \ } static struct plat_serial8250_port uart8250_data[] = { UART(0x1f000900, MIPS_CPU_IRQ_BASE + 4), /* ttyS0 = USB */ UART(0x1f000800, MIPS_CPU_IRQ_BASE + 4), /* ttyS1 = RS232 */ { }, }; static struct platform_device uart8250_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = uart8250_data, }, }; static int __init uart8250_init(void) { return platform_device_register(&uart8250_device); } module_init(uart8250_init); MODULE_AUTHOR("Chris Dearman <chris@mips.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("8250 UART probe driver for the SEAD-3 platform");
gpl-2.0
cphelps76/kernel_google_msm
drivers/char/msm_smd_pkt.c
3403
10666
/* Copyright (c) 2008-2010, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * SMD Packet Driver -- Provides userspace interface to SMD packet ports. */ #include <linux/slab.h> #include <linux/cdev.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/device.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/uaccess.h> #include <linux/workqueue.h> #include <linux/poll.h> #include <mach/msm_smd.h> #define NUM_SMD_PKT_PORTS 9 #define DEVICE_NAME "smdpkt" #define MAX_BUF_SIZE 2048 struct smd_pkt_dev { struct cdev cdev; struct device *devicep; struct smd_channel *ch; int open_count; struct mutex ch_lock; struct mutex rx_lock; struct mutex tx_lock; wait_queue_head_t ch_read_wait_queue; wait_queue_head_t ch_opened_wait_queue; int i; unsigned char tx_buf[MAX_BUF_SIZE]; unsigned char rx_buf[MAX_BUF_SIZE]; int remote_open; } *smd_pkt_devp[NUM_SMD_PKT_PORTS]; struct class *smd_pkt_classp; static dev_t smd_pkt_number; static int msm_smd_pkt_debug_enable; module_param_named(debug_enable, msm_smd_pkt_debug_enable, int, S_IRUGO | S_IWUSR | S_IWGRP); #ifdef DEBUG #define D_DUMP_BUFFER(prestr, cnt, buf) do { \ int i; \ if (msm_smd_pkt_debug_enable) { \ pr_debug("%s", prestr); \ for (i = 0; i < cnt; i++) \ pr_debug("%.2x", buf[i]); \ pr_debug("\n"); \ } \ } while (0) #else #define D_DUMP_BUFFER(prestr, cnt, buf) do {} while (0) #endif #ifdef DEBUG #define DBG(x...) do { \ if (msm_smd_pkt_debug_enable) \ pr_debug(x); \ } while (0) #else #define DBG(x...) do {} while (0) #endif static void check_and_wakeup_reader(struct smd_pkt_dev *smd_pkt_devp) { int sz; if (!smd_pkt_devp || !smd_pkt_devp->ch) return; sz = smd_cur_packet_size(smd_pkt_devp->ch); if (sz == 0) { DBG("no packet\n"); return; } if (sz > smd_read_avail(smd_pkt_devp->ch)) { DBG("incomplete packet\n"); return; } DBG("waking up reader\n"); wake_up_interruptible(&smd_pkt_devp->ch_read_wait_queue); } static int smd_pkt_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int r, bytes_read; struct smd_pkt_dev *smd_pkt_devp; struct smd_channel *chl; DBG("read %d bytes\n", count); if (count > MAX_BUF_SIZE) return -EINVAL; smd_pkt_devp = file->private_data; if (!smd_pkt_devp || !smd_pkt_devp->ch) return -EINVAL; chl = smd_pkt_devp->ch; wait_for_packet: r = wait_event_interruptible(smd_pkt_devp->ch_read_wait_queue, (smd_cur_packet_size(chl) > 0 && smd_read_avail(chl) >= smd_cur_packet_size(chl))); if (r < 0) { if (r != -ERESTARTSYS) pr_err("wait returned %d\n", r); return r; } mutex_lock(&smd_pkt_devp->rx_lock); bytes_read = smd_cur_packet_size(smd_pkt_devp->ch); if (bytes_read == 0 || bytes_read < smd_read_avail(smd_pkt_devp->ch)) { mutex_unlock(&smd_pkt_devp->rx_lock); DBG("Nothing to read\n"); goto wait_for_packet; } if (bytes_read > count) { mutex_unlock(&smd_pkt_devp->rx_lock); pr_info("packet size %d > buffer size %d", bytes_read, count); return -EINVAL; } r = smd_read(smd_pkt_devp->ch, smd_pkt_devp->rx_buf, bytes_read); if (r != bytes_read) { mutex_unlock(&smd_pkt_devp->rx_lock); pr_err("smd_read failed to read %d bytes: %d\n", bytes_read, r); return -EIO; } D_DUMP_BUFFER("read: ", bytes_read, smd_pkt_devp->rx_buf); r = copy_to_user(buf, smd_pkt_devp->rx_buf, bytes_read); mutex_unlock(&smd_pkt_devp->rx_lock); if (r) { pr_err("copy_to_user failed %d\n", r); return -EFAULT; } DBG("read complete %d bytes\n", bytes_read); check_and_wakeup_reader(smd_pkt_devp); return bytes_read; } static int smd_pkt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { int r; struct smd_pkt_dev *smd_pkt_devp; if (count > MAX_BUF_SIZE) return -EINVAL; DBG("writting %d bytes\n", count); smd_pkt_devp = file->private_data; if (!smd_pkt_devp || !smd_pkt_devp->ch) return -EINVAL; mutex_lock(&smd_pkt_devp->tx_lock); if (smd_write_avail(smd_pkt_devp->ch) < count) { mutex_unlock(&smd_pkt_devp->tx_lock); DBG("Not enough space to write\n"); return -ENOMEM; } D_DUMP_BUFFER("write: ", count, buf); r = copy_from_user(smd_pkt_devp->tx_buf, buf, count); if (r) { mutex_unlock(&smd_pkt_devp->tx_lock); pr_err("copy_from_user failed %d\n", r); return -EFAULT; } r = smd_write(smd_pkt_devp->ch, smd_pkt_devp->tx_buf, count); if (r != count) { mutex_unlock(&smd_pkt_devp->tx_lock); pr_err("smd_write failed to write %d bytes: %d.\n", count, r); return -EIO; } mutex_unlock(&smd_pkt_devp->tx_lock); DBG("wrote %d bytes\n", count); return count; } static unsigned int smd_pkt_poll(struct file *file, poll_table *wait) { struct smd_pkt_dev *smd_pkt_devp; unsigned int mask = 0; smd_pkt_devp = file->private_data; if (!smd_pkt_devp) return POLLERR; DBG("poll waiting\n"); poll_wait(file, &smd_pkt_devp->ch_read_wait_queue, wait); if (smd_read_avail(smd_pkt_devp->ch)) mask |= POLLIN | POLLRDNORM; DBG("poll return\n"); return mask; } static void smd_pkt_ch_notify(void *priv, unsigned event) { struct smd_pkt_dev *smd_pkt_devp = priv; if (smd_pkt_devp->ch == 0) return; switch (event) { case SMD_EVENT_DATA: DBG("data\n"); check_and_wakeup_reader(smd_pkt_devp); break; case SMD_EVENT_OPEN: DBG("remote open\n"); smd_pkt_devp->remote_open = 1; wake_up_interruptible(&smd_pkt_devp->ch_opened_wait_queue); break; case SMD_EVENT_CLOSE: smd_pkt_devp->remote_open = 0; pr_info("remote closed\n"); break; default: pr_err("unknown event %d\n", event); break; } } static char *smd_pkt_dev_name[] = { "smdcntl0", "smdcntl1", "smdcntl2", "smdcntl3", "smdcntl4", "smdcntl5", "smdcntl6", "smdcntl7", "smd22", }; static char *smd_ch_name[] = { "DATA5_CNTL", "DATA6_CNTL", "DATA7_CNTL", "DATA8_CNTL", "DATA9_CNTL", "DATA12_CNTL", "DATA13_CNTL", "DATA14_CNTL", "DATA22", }; static int smd_pkt_open(struct inode *inode, struct file *file) { int r = 0; struct smd_pkt_dev *smd_pkt_devp; smd_pkt_devp = container_of(inode->i_cdev, struct smd_pkt_dev, cdev); if (!smd_pkt_devp) return -EINVAL; file->private_data = smd_pkt_devp; mutex_lock(&smd_pkt_devp->ch_lock); if (smd_pkt_devp->open_count == 0) { r = smd_open(smd_ch_name[smd_pkt_devp->i], &smd_pkt_devp->ch, smd_pkt_devp, smd_pkt_ch_notify); if (r < 0) { pr_err("smd_open failed for %s, %d\n", smd_ch_name[smd_pkt_devp->i], r); goto out; } r = wait_event_interruptible_timeout( smd_pkt_devp->ch_opened_wait_queue, smd_pkt_devp->remote_open, msecs_to_jiffies(2 * HZ)); if (r == 0) r = -ETIMEDOUT; if (r < 0) { pr_err("wait returned %d\n", r); smd_close(smd_pkt_devp->ch); smd_pkt_devp->ch = 0; } else { smd_pkt_devp->open_count++; r = 0; } } out: mutex_unlock(&smd_pkt_devp->ch_lock); return r; } static int smd_pkt_release(struct inode *inode, struct file *file) { int r = 0; struct smd_pkt_dev *smd_pkt_devp = file->private_data; if (!smd_pkt_devp) return -EINVAL; mutex_lock(&smd_pkt_devp->ch_lock); if (--smd_pkt_devp->open_count == 0) { r = smd_close(smd_pkt_devp->ch); smd_pkt_devp->ch = 0; } mutex_unlock(&smd_pkt_devp->ch_lock); return r; } static const struct file_operations smd_pkt_fops = { .owner = THIS_MODULE, .open = smd_pkt_open, .release = smd_pkt_release, .read = smd_pkt_read, .write = smd_pkt_write, .poll = smd_pkt_poll, }; static int __init smd_pkt_init(void) { int i; int r; r = alloc_chrdev_region(&smd_pkt_number, 0, NUM_SMD_PKT_PORTS, DEVICE_NAME); if (r) { pr_err("alloc_chrdev_region() failed %d\n", r); return r; } smd_pkt_classp = class_create(THIS_MODULE, DEVICE_NAME); if (IS_ERR(smd_pkt_classp)) { r = PTR_ERR(smd_pkt_classp); pr_err("class_create() failed %d\n", r); goto unreg_chardev; } for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) { smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev), GFP_KERNEL); if (!smd_pkt_devp[i]) { pr_err("kmalloc() failed\n"); goto clean_cdevs; } smd_pkt_devp[i]->i = i; init_waitqueue_head(&smd_pkt_devp[i]->ch_read_wait_queue); smd_pkt_devp[i]->remote_open = 0; init_waitqueue_head(&smd_pkt_devp[i]->ch_opened_wait_queue); mutex_init(&smd_pkt_devp[i]->ch_lock); mutex_init(&smd_pkt_devp[i]->rx_lock); mutex_init(&smd_pkt_devp[i]->tx_lock); cdev_init(&smd_pkt_devp[i]->cdev, &smd_pkt_fops); smd_pkt_devp[i]->cdev.owner = THIS_MODULE; r = cdev_add(&smd_pkt_devp[i]->cdev, (smd_pkt_number + i), 1); if (r) { pr_err("cdev_add() failed %d\n", r); kfree(smd_pkt_devp[i]); goto clean_cdevs; } smd_pkt_devp[i]->devicep = device_create(smd_pkt_classp, NULL, (smd_pkt_number + i), NULL, smd_pkt_dev_name[i]); if (IS_ERR(smd_pkt_devp[i]->devicep)) { r = PTR_ERR(smd_pkt_devp[i]->devicep); pr_err("device_create() failed %d\n", r); cdev_del(&smd_pkt_devp[i]->cdev); kfree(smd_pkt_devp[i]); goto clean_cdevs; } } pr_info("SMD Packet Port Driver Initialized.\n"); return 0; clean_cdevs: if (i > 0) { while (--i >= 0) { mutex_destroy(&smd_pkt_devp[i]->ch_lock); mutex_destroy(&smd_pkt_devp[i]->rx_lock); mutex_destroy(&smd_pkt_devp[i]->tx_lock); cdev_del(&smd_pkt_devp[i]->cdev); kfree(smd_pkt_devp[i]); device_destroy(smd_pkt_classp, MKDEV(MAJOR(smd_pkt_number), i)); } } class_destroy(smd_pkt_classp); unreg_chardev: unregister_chrdev_region(MAJOR(smd_pkt_number), NUM_SMD_PKT_PORTS); return r; } module_init(smd_pkt_init); static void __exit smd_pkt_cleanup(void) { int i; for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) { mutex_destroy(&smd_pkt_devp[i]->ch_lock); mutex_destroy(&smd_pkt_devp[i]->rx_lock); mutex_destroy(&smd_pkt_devp[i]->tx_lock); cdev_del(&smd_pkt_devp[i]->cdev); kfree(smd_pkt_devp[i]); device_destroy(smd_pkt_classp, MKDEV(MAJOR(smd_pkt_number), i)); } class_destroy(smd_pkt_classp); unregister_chrdev_region(MAJOR(smd_pkt_number), NUM_SMD_PKT_PORTS); } module_exit(smd_pkt_cleanup); MODULE_DESCRIPTION("MSM Shared Memory Packet Port"); MODULE_LICENSE("GPL v2");
gpl-2.0
gianogli/boeffla-kernel-cm-s3-7
lib/argv_split.c
4427
1826
/* * Helper function for splitting a string into an argv-like array. */ #include <linux/kernel.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/module.h> static const char *skip_arg(const char *cp) { while (*cp && !isspace(*cp)) cp++; return cp; } static int count_argc(const char *str) { int count = 0; while (*str) { str = skip_spaces(str); if (*str) { count++; str = skip_arg(str); } } return count; } /** * argv_free - free an argv * @argv - the argument vector to be freed * * Frees an argv and the strings it points to. */ void argv_free(char **argv) { char **p; for (p = argv; *p; p++) kfree(*p); kfree(argv); } EXPORT_SYMBOL(argv_free); /** * argv_split - split a string at whitespace, returning an argv * @gfp: the GFP mask used to allocate memory * @str: the string to be split * @argcp: returned argument count * * Returns an array of pointers to strings which are split out from * @str. This is performed by strictly splitting on white-space; no * quote processing is performed. Multiple whitespace characters are * considered to be a single argument separator. The returned array * is always NULL-terminated. Returns NULL on memory allocation * failure. */ char **argv_split(gfp_t gfp, const char *str, int *argcp) { int argc = count_argc(str); char **argv = kzalloc(sizeof(*argv) * (argc+1), gfp); char **argvp; if (argv == NULL) goto out; if (argcp) *argcp = argc; argvp = argv; while (*str) { str = skip_spaces(str); if (*str) { const char *p = str; char *t; str = skip_arg(str); t = kstrndup(p, str-p, gfp); if (t == NULL) goto fail; *argvp++ = t; } } *argvp = NULL; out: return argv; fail: argv_free(argv); return NULL; } EXPORT_SYMBOL(argv_split);
gpl-2.0
CurtisMJ/g800f_custom_kernel
arch/powerpc/platforms/cell/spufs/syscalls.c
4427
2137
#include <linux/file.h> #include <linux/fs.h> #include <linux/export.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/slab.h> #include <asm/uaccess.h> #include "spufs.h" /** * sys_spu_run - run code loaded into an SPU * * @unpc: next program counter for the SPU * @ustatus: status of the SPU * * This system call transfers the control of execution of a * user space thread to an SPU. It will return when the * SPU has finished executing or when it hits an error * condition and it will be interrupted if a signal needs * to be delivered to a handler in user space. * * The next program counter is set to the passed value * before the SPU starts fetching code and the user space * pointer gets updated with the new value when returning * from kernel space. * * The status value returned from spu_run reflects the * value of the spu_status register after the SPU has stopped. * */ static long do_spu_run(struct file *filp, __u32 __user *unpc, __u32 __user *ustatus) { long ret; struct spufs_inode_info *i; u32 npc, status; ret = -EFAULT; if (get_user(npc, unpc)) goto out; /* check if this file was created by spu_create */ ret = -EINVAL; if (filp->f_op != &spufs_context_fops) goto out; i = SPUFS_I(filp->f_path.dentry->d_inode); ret = spufs_run_spu(i->i_ctx, &npc, &status); if (put_user(npc, unpc)) ret = -EFAULT; if (ustatus && put_user(status, ustatus)) ret = -EFAULT; out: return ret; } static long do_spu_create(const char __user *pathname, unsigned int flags, umode_t mode, struct file *neighbor) { struct path path; struct dentry *dentry; int ret; dentry = user_path_create(AT_FDCWD, pathname, &path, 1); ret = PTR_ERR(dentry); if (!IS_ERR(dentry)) { ret = spufs_create(&path, dentry, flags, mode, neighbor); path_put(&path); } return ret; } struct spufs_calls spufs_calls = { .create_thread = do_spu_create, .spu_run = do_spu_run, .coredump_extra_notes_size = spufs_coredump_extra_notes_size, .coredump_extra_notes_write = spufs_coredump_extra_notes_write, .notify_spus_active = do_notify_spus_active, .owner = THIS_MODULE, };
gpl-2.0